diff --git a/go.mod b/go.mod index 2288c4a9..8632e3fb 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,8 @@ require ( github.com/google/gops v0.3.27 github.com/jessevdk/go-flags v1.5.0 github.com/miekg/dns v1.1.50 - github.com/onsi/ginkgo/v2 v2.13.2 - github.com/onsi/gomega v1.29.0 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.16.0 github.com/pyroscope-io/client v0.7.1 @@ -29,17 +29,17 @@ require ( go.opentelemetry.io/otel/trace v1.14.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/zap v1.25.0 - golang.org/x/net v0.21.0 + golang.org/x/net v0.26.0 google.golang.org/grpc v1.61.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.3 - k8s.io/apimachinery v0.26.3 + k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.26.3 - k8s.io/code-generator v0.26.3 - k8s.io/klog/v2 v2.90.1 - k8s.io/utils v0.0.0-20230209194617-a36077c30491 + k8s.io/code-generator v0.31.1 + k8s.io/klog/v2 v2.130.1 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.14.6 sigs.k8s.io/controller-tools v0.10.0 ) @@ -57,7 +57,7 @@ require ( github.com/go-openapi/swag v0.22.6 github.com/go-openapi/validate v0.22.1 github.com/go-swagger/go-swagger v0.30.4 - k8s.io/kube-openapi v0.0.0-20221110221610-a28e98eb7c70 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect ) require ( @@ -67,7 +67,7 @@ require ( github.com/ii2day/connexus v0.1.10 github.com/shirou/gopsutil v3.21.11+incompatible k8s.io/apiserver v0.26.3 - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -88,28 +88,30 @@ require ( github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fatih/color v1.14.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v0.2.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/cel-go v0.12.6 // indirect github.com/google/gnostic v0.6.9 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect @@ -146,7 +148,7 @@ require ( github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/pyroscope-io/godeltaprof v0.1.0 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect @@ -159,22 +161,23 @@ require ( github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.etcd.io/etcd/api/v3 v3.5.10 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect go.etcd.io/etcd/client/v3 v3.5.10 // indirect go.mongodb.org/mongo-driver v1.13.1 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/crypto v0.19.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.17.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.15.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect @@ -185,10 +188,10 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/apiextensions-apiserver v0.26.3 // indirect k8s.io/component-base v0.26.3 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect k8s.io/kms v0.26.3 // indirect k8s.io/kubectl v0.26.3 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 71b4ace4..fbac3b5b 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -129,15 +129,15 @@ github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= @@ -186,8 +186,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-swagger/go-swagger v0.30.4 h1:cPrWLSXY6ZdcgfRicOj0lANg72TkTHz6uv/OlUdzO5U= github.com/go-swagger/go-swagger v0.30.4/go.mod h1:YM5D5kR9c1ft3ynMXvDk2uo/7UZHKFEqKXcAL9f4Phc= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -250,8 +250,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -261,6 +261,8 @@ github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -271,10 +273,10 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gops v0.3.27 h1:BDdWfedShsBbeatZ820oA4DbVOC8yJ4NI8xAlDFWfgI= @@ -288,13 +290,13 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= -github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -399,10 +401,10 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= -github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -442,8 +444,8 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= @@ -494,8 +496,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -506,6 +508,8 @@ github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= @@ -588,8 +592,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -624,8 +628,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -661,8 +665,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -683,8 +687,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -729,13 +733,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -747,8 +751,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -793,7 +797,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -805,8 +808,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= -golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -906,8 +909,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -946,29 +949,28 @@ k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= k8s.io/apiextensions-apiserver v0.26.3 h1:5PGMm3oEzdB1W/FTMgGIDmm100vn7IaUP5er36dB+YE= k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN6VJrElV3sfpQ= -k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= -k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apiserver v0.26.3 h1:blBpv+yOiozkPH2aqClhJmJY+rp53Tgfac4SKPDJnU4= k8s.io/apiserver v0.26.3/go.mod h1:CJe/VoQNcXdhm67EvaVjYXxR3QyfwpceKPuPaeLibTA= k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= -k8s.io/code-generator v0.26.3 h1:DNYPsWoeFwmg4qFg97Z1cHSSv7KSG10mAEIFoZGTQM8= -k8s.io/code-generator v0.26.3/go.mod h1:ryaiIKwfxEJEaywEzx3dhWOydpVctKYbqLajJf0O8dI= +k8s.io/code-generator v0.31.1 h1:GvkRZEP2g2UnB2QKT2Dgc/kYxIkDxCHENv2Q1itioVs= +k8s.io/code-generator v0.31.1/go.mod h1:oL2ky46L48osNqqZAeOcWWy0S5BXj50vVdwOtTefqIs= k8s.io/component-base v0.26.3 h1:oC0WMK/ggcbGDTkdcqefI4wIZRYdK3JySx9/HADpV0g= k8s.io/component-base v0.26.3/go.mod h1:5kj1kZYwSC6ZstHJN7oHBqcJC6yyn41eR+Sqa/mQc8E= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kms v0.26.3 h1:+rC4BMeMBkH5hrfZt9WFMRrs2m3vY2rXymisNactcTY= k8s.io/kms v0.26.3/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg= -k8s.io/kube-openapi v0.0.0-20221110221610-a28e98eb7c70 h1:zfqQc1V6/ZgGpvrOVvr62OjiqQX4lZjfznK34NQwkqw= -k8s.io/kube-openapi v0.0.0-20221110221610-a28e98eb7c70/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.26.3 h1:bZ5SgFyeEXw6XTc1Qji0iNdtqAC76lmeIIQULg2wNXM= k8s.io/kubectl v0.26.3/go.mod h1:02+gv7Qn4dupzN3fi/9OvqqdW+uG/4Zi56vc4Zmsp1g= -k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= -k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -978,10 +980,9 @@ sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92 sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= sigs.k8s.io/controller-tools v0.10.0 h1:0L5DTDTFB67jm9DkfrONgTGmfc/zYow0ZaHyppizU2U= sigs.k8s.io/controller-tools v0.10.0/go.mod h1:uvr0EW6IsprfB0jpQq6evtKy+hHyHCXNfdWI5ONPx94= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 74a37815..5edd5a7c 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,10 +1,30 @@ # Change history of go-restful -## [v3.9.0] - 20221-07-21 +## [v3.11.0] - 2023-08-19 + +- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. + +## [v3.10.2] - 2023-03-09 - DO NOT USE + +- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0 + see comment in Readme how to customize this behaviour. + +## [v3.10.1] - 2022-11-19 - DO NOT USE + +- fix broken 3.10.0 by using path package for joining paths + +## [v3.10.0] - 2022-10-11 - BROKEN + +- changed tokenizer to match std route match behavior; do not trimright the path (#511) +- Add MIME_ZIP (#512) +- Add MIME_ZIP and HEADER_ContentDisposition (#513) +- Changed how to get query parameter issue #510 + +## [v3.9.0] - 2022-07-21 - add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) -## [v3.8.0] - 20221-06-06 +## [v3.8.0] - 2022-06-06 - use exact matching of allowed domain entries, issue #489 (#493) - this changes fixes [security] Authorization Bypass Through User-Controlled Key diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 0625359d..e3e30080 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -79,7 +79,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Content encoding (gzip,deflate) of request and response payloads - Automatic responses on OPTIONS (using a filter) - Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) +- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi)) - Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging @@ -96,6 +96,7 @@ There are several hooks to customize the behavior of the go-restful package. - Compression - Encoders for other serializers - Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources @@ -108,4 +109,4 @@ There are several hooks to customize the behavior of the go-restful package. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2023, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go index 203439c5..2328bde6 100644 --- a/vendor/github.com/emicklei/go-restful/v3/constants.go +++ b/vendor/github.com/emicklei/go-restful/v3/constants.go @@ -7,12 +7,14 @@ package restful const ( MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces() MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default HEADER_Allow = "Allow" HEADER_Accept = "Accept" HEADER_Origin = "Origin" HEADER_ContentType = "Content-Type" + HEADER_ContentDisposition = "Content-Disposition" HEADER_LastModified = "Last-Modified" HEADER_AcceptEncoding = "Accept-Encoding" HEADER_ContentEncoding = "Content-Encoding" diff --git a/vendor/github.com/emicklei/go-restful/v3/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go index 5725a075..0020095e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request { // a "Unable to unmarshal content of type:" response is returned. // Valid values are restful.MIME_JSON and restful.MIME_XML // Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) +// +// restful.DefaultRequestContentType(restful.MIME_JSON) func DefaultRequestContentType(mime string) { defaultRequestContentType = mime } @@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string { // QueryParameter returns the (first) Query parameter value by its name func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) + return r.Request.URL.Query().Get(name) } // QueryParameters returns the all the query parameters values by name diff --git a/vendor/github.com/emicklei/go-restful/v3/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go index 8f0b56aa..a41a92cc 100644 --- a/vendor/github.com/emicklei/go-restful/v3/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) { if DefaultResponseMimeType == MIME_XML { return entityAccessRegistry.accessorAt(MIME_XML) } + if DefaultResponseMimeType == MIME_ZIP { + return entityAccessRegistry.accessorAt(MIME_ZIP) + } // Fallback to whatever the route says it can produce. // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html for _, each := range r.routeProduces { diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 193f4a6b..306c44be 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -40,7 +40,8 @@ type Route struct { ParameterDocs []*Parameter ResponseErrors map[int]ResponseError DefaultResponse *ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload + ReadSample, WriteSample interface{} // structs that model an example request or response payload + WriteSamples []interface{} // if more than one return types is possible (oneof) then this will contain multiple values // Extra information used to store custom information about the route. Metadata map[string]interface{} @@ -164,7 +165,13 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.Trim(path, "/"), "/") + if TrimRightSlashEnabled { + // 3.9.0 + return strings.Split(strings.Trim(path, "/"), "/") + } else { + // 3.10.2 + return strings.Split(strings.TrimLeft(path, "/"), "/") + } } // for debugging @@ -176,3 +183,9 @@ func (r *Route) String() string { func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } + +// TrimRightSlashEnabled controls whether +// - path on route building is using path.Join +// - the path of the incoming request is trimmed of its slash suffux. +// Value of true matches the behavior of <= 3.9.0 +var TrimRightSlashEnabled = true diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 23641b6d..75168c12 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -7,6 +7,7 @@ package restful import ( "fmt" "os" + "path" "reflect" "runtime" "strings" @@ -30,27 +31,29 @@ type RouteBuilder struct { typeNameHandleFunc TypeNameHandleFunction // required // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError - defaultResponse *ResponseError - metadata map[string]interface{} - extensions map[string]interface{} - deprecated bool - contentEncodingEnabled *bool + doc string + notes string + operation string + readSample interface{} + writeSamples []interface{} + parameters []*Parameter + errorMap map[int]ResponseError + defaultResponse *ResponseError + metadata map[string]interface{} + extensions map[string]interface{} + deprecated bool + contentEncodingEnabled *bool } // Do evaluates each argument with the RouteBuilder itself. // This allows you to follow DRY principles without breaking the fluent programming style. // Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) // -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { for _, each := range oneArgBlocks { each(b) @@ -133,9 +136,9 @@ func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { return p } -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample +// Writes tells which one of the resource types will be written as the response payload. Optional. +func (b *RouteBuilder) Writes(samples ...interface{}) *RouteBuilder { + b.writeSamples = samples // oneof return b } @@ -340,19 +343,29 @@ func (b *RouteBuilder) Build() Route { ResponseErrors: b.errorMap, DefaultResponse: b.defaultResponse, ReadSample: b.readSample, - WriteSample: b.writeSample, + WriteSamples: b.writeSamples, Metadata: b.metadata, Deprecated: b.deprecated, contentEncodingEnabled: b.contentEncodingEnabled, allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } + // set WriteSample if one specified + if len(b.writeSamples) == 1 { + route.WriteSample = b.writeSamples[0] + } route.Extensions = b.extensions route.postBuild() return route } -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") +// merge two paths using the current (package global) merge path strategy. +func concatPath(rootPath, routePath string) string { + + if TrimRightSlashEnabled { + return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/") + } else { + return path.Join(rootPath, routePath) + } } var anonymousFuncCount int32 diff --git a/vendor/github.com/fxamacker/cbor/v2/.gitignore b/vendor/github.com/fxamacker/cbor/v2/.gitignore new file mode 100644 index 00000000..f1c181ec --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/fxamacker/cbor/v2/.golangci.yml b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml new file mode 100644 index 00000000..38cb9ae1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/.golangci.yml @@ -0,0 +1,104 @@ +# Do not delete linter settings. Linters like gocritic can be enabled on the command line. + +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: strict + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/x448/float16 + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + ignore-tests: true + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - dupImport # https://github.com/go-critic/go-critic/issues/845 + - ifElseChain + - octalLiteral + - paramTypeCombine + - whyNoLint + gofmt: + simplify: false + goimports: + local-prefixes: github.com/fxamacker/cbor + golint: + min-confidence: 0 + govet: + check-shadowing: true + lll: + line-length: 140 + maligned: + suggest-new: true + misspell: + locale: US + staticcheck: + checks: ["all"] + +linters: + disable-all: true + enable: + - asciicheck + - bidichk + - depguard + - errcheck + - exportloopref + - goconst + - gocritic + - gocyclo + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nilerr + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unused + +issues: + # max-issues-per-linter default is 50. Set to 0 to disable limit. + max-issues-per-linter: 0 + # max-same-issues default is 3. Set to 0 to disable limit. + max-same-issues: 0 + + exclude-rules: + - path: decode.go + text: "string ` overflows ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `, ` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant" + - path: decode.go + text: "string `\\]\\)` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string ` for type ` has (\\d+) occurrences, make it a constant" + - path: valid.go + text: "string `cbor: ` has (\\d+) occurrences, make it a constant" diff --git a/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..c794b2b0 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +faye.github@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md new file mode 100644 index 00000000..de0965e1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md @@ -0,0 +1,41 @@ +# How to contribute + +You can contribute by using the library, opening issues, or opening pull requests. + +## Bug reports and security vulnerabilities + +Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues). + +To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy). + +Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me. + +## Pull requests + +Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc. + +Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts. + +See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details. + +Pull requests have a greater chance of being approved if: +- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature. +- it has > 97% code coverage. + +## Describe your issue + +Clearly describe the issue: +* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error. +* If you propose a change or addition, try to give an example how the improved code could look like or how to use it. +* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message. + +## Please don't + +Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me. + +Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me. + +## Credits + +- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22. +- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements. diff --git a/vendor/github.com/fxamacker/cbor/v2/LICENSE b/vendor/github.com/fxamacker/cbor/v2/LICENSE new file mode 100644 index 00000000..eaa85049 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md new file mode 100644 index 00000000..af0a7950 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -0,0 +1,691 @@ +# CBOR Codec in Go + + + +[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). + +CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc.  CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades. + +`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). + +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. + +## fxamacker/cbor + +[![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) +[![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) +[![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. + +Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. + +
Highlights

+ +__🚀  Speed__ + +Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data. + +__🔒  Security__ + +Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation. + +__🗜️  Data Size__ + +Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. + +__:jigsaw:  Usability__ + +API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines. + +Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc. + +__📆  Extensibility__ + +Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library. + +


+ +
+ +### Secure Decoding with Configurable Settings + +`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. + +By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). + +
Example decoding with encoding/gob 💥 fatal error (out of memory)

+ +```Go +// Example of encoding/gob having "fatal error: runtime: out of memory" +// while decoding 181 bytes. +package main +import ( + "bytes" + "encoding/gob" + "encoding/hex" + "fmt" +) + +// Example data is from https://github.com/golang/go/issues/24446 +// (shortened to 181 bytes). +const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + + "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + + "860001013001ff860001013001ffb80000001eff850401010e3030303030" + + "30303030303030303001ff3000010c0104000016ffb70201010830303030" + + "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + + "303030303030303030303030303030303030303030303030303030303030" + + "30" + +type X struct { + J *X + K map[string]int +} + +func main() { + raw, _ := hex.DecodeString(data) + decoder := gob.NewDecoder(bytes.NewReader(raw)) + + var x X + decoder.Decode(&x) // fatal error: runtime: out of memory + fmt.Println("Decoding finished.") +} +``` + +


+ +
+ +`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to +decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | + +
Benchmark details

+ +Latest comparison used: +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) +- go test -bench=. -benchmem -count=20 + +#### Prior comparisons + +| Codec | Speed (ns/op) | Memory | Allocs | +| :---- | ------------: | -----: | -----: | +| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | + +- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +- go1.19.6, linux/amd64, i5-13600K (DDR4) +- go test -bench=. -benchmem -count=20 + +


+ +
+ +### Smaller Encodings with Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +Example using different struct tags together: + +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. + +## Quick Start + +__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. + +### Key Points + +This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). + +- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items. +- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items. + +Configurable limits and options can be used to balance trade-offs. + +- Encoding and decoding modes are created from options (settings). +- Modes can be created at startup and reused. +- Modes are safe for concurrent use. + +### Default Mode + +Package level functions only use this library's default settings. +They provide the "default mode" of encoding and decoding. + +```go +// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc. +b, err = cbor.Marshal(v) // encode v to []byte b +err = cbor.Unmarshal(b, &v) // decode []byte b to v +decoder = cbor.NewDecoder(r) // create decoder with io.Reader r +err = decoder.Decode(&v) // decode a CBOR data item to v + +// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface. +err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool. + +// v2.5.0 added new functions that return remaining bytes. + +// UnmarshalFirst decodes first CBOR data item and returns remaining bytes. +rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v + +// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. +text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text + +// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, +// but new funcs UnmarshalFirst and DiagnoseFirst do not. +``` + +__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. + +- Different CBOR libraries may use different default settings. +- CBOR-based formats or protocols usually require specific settings. + +For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. + +### Presets + +Presets can be used as-is or as a starting point for custom settings. + +```go +// EncOptions is a struct of encoder settings. +func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding +func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization +func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR +func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR +``` + +Presets are used to create custom modes. + +### Custom Modes + +Modes are created from settings. Once created, modes have immutable settings. + +💡 Create the mode at startup and reuse it. It is safe for concurrent use. + +```Go +// Create encoding mode. +opts := cbor.CoreDetEncOptions() // use preset options as a starting point +opts.Time = cbor.TimeUnix // change any settings if needed +em, err := opts.EncMode() // create an immutable encoding mode + +// Reuse the encoding mode. It is safe for concurrent use. + +// API matches encoding/json. +b, err := em.Marshal(v) // encode v to []byte b +encoder := em.NewEncoder(w) // create encoder with io.Writer w +err := encoder.Encode(v) // encode v to io.Writer w +``` + +Default mode and custom modes automatically apply struct tags. + +### User Specified Buffer for Encoding (v2.7.0) + +`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool. + +```Go +em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode + +var buf bytes.Buffer +err = em.MarshalToBuffer(v, &buf) // encode v to provided buf +``` + +### Struct Tags + +Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. + +
Example encoding 3-level nested Go struct to 1 byte CBOR

+ +https://go.dev/play/p/YxwvfPdFQG2 + +```Go +// Example encoding nested struct (with omitempty tag) +// - encoding/json: 18 byte JSON +// - fxamacker/cbor: 1 byte CBOR +package main + +import ( + "encoding/hex" + "encoding/json" + "fmt" + + "github.com/fxamacker/cbor/v2" +) + +type GrandChild struct { + Quux int `json:",omitempty"` +} + +type Child struct { + Baz int `json:",omitempty"` + Qux GrandChild `json:",omitempty"` +} + +type Parent struct { + Foo Child `json:",omitempty"` + Bar int `json:",omitempty"` +} + +func cb() { + results, _ := cbor.Marshal(Parent{}) + fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) + + text, _ := cbor.Diagnose(results) // Diagnostic Notation + fmt.Println("DN: " + text) +} + +func js() { + results, _ := json.Marshal(Parent{}) + fmt.Println("hex(JSON): " + hex.EncodeToString(results)) + + text := string(results) // JSON + fmt.Println("JSON: " + text) +} + +func main() { + cb() + fmt.Println("-------------") + js() +} +``` + +Output (DN is Diagnostic Notation): +``` +hex(CBOR): a0 +DN: {} +------------- +hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +JSON: {"Foo":{"Qux":{}}} +``` + +


+ +
+ +
Example using several struct tags

+ +![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") + +

+ +Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. + +### CBOR Tags + +CBOR tags are specified in a `TagSet`. + +Custom modes can be created with a `TagSet` to handle CBOR tags. + +```go +em, err := opts.EncMode() // no CBOR tags +em, err := opts.EncModeWithTags(ts) // immutable CBOR tags +em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags +``` + +`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. + +
Example using TagSet and TagOptions

+ +```go +// Use signedCWT struct defined in "Decoding CWT" example. + +// Create TagSet (safe for concurrency). +tags := cbor.NewTagSet() +// Register tag COSE_Sign1 18 with signedCWT type. +tags.Add( + cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired}, + reflect.TypeOf(signedCWT{}), + 18) + +// Create DecMode with immutable tags. +dm, _ := cbor.DecOptions{}.DecModeWithTags(tags) + +// Unmarshal to signedCWT with tag support. +var v signedCWT +if err := dm.Unmarshal(data, &v); err != nil { + return err +} + +// Create EncMode with immutable tags. +em, _ := cbor.EncOptions{}.EncModeWithTags(tags) + +// Marshal signedCWT with tag number. +if data, err := cbor.Marshal(v); err != nil { + return err +} +``` + +

+ +### Functions and Interfaces + +
Functions and interfaces at a glance

+ +Common functions with same API as `encoding/json`: +- `Marshal`, `Unmarshal` +- `NewEncoder`, `(*Encoder).Encode` +- `NewDecoder`, `(*Decoder).Decode` + +NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes +because RFC 8949 treats CBOR data item with remaining bytes as malformed. +- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes. + +Other useful functions: +- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data. +- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes. +- `Wellformed` returns true if the the CBOR data item is well-formed. + +Interfaces identical or comparable to Go `encoding` packages include: +`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`. + +The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding. + +

+ +### Security Tips + +🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data. + +Default limits may need to be increased for systems handling very large data (e.g. blockchains). + +`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`. + +## Status + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. + +For more details, see [release notes](https://github.com/fxamacker/cbor/releases). + +### Prior Release + +[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. + +v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading. + +See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes. + +See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. + + + +## Who uses fxamacker/cbor + +`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others. + +`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope. + +## Standards + +`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). + +Notable CBOR features include: + +| CBOR Feature | Description | +| :--- | :--- | +| CBOR tags | API supports built-in and user-defined tags. | +| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. | +| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). | +| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. | +| Indefinite length data | Option to allow/forbid for encoding and decoding. | +| Well-formedness | Always checked and enforced. | +| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. | +| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). | + +Known limitations are noted in the [Limitations section](#limitations). + +Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps. + +Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data. + +After well-formedness is verified, basic validity errors are handled as follows: + +* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default. +* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys. + +When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future. + +By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined. + +__Click to expand topic:__ + +
+ Duplicate Map Keys

+ +This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct. + +`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type. + +`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number. + +APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol. + +

+ +
+ Tag Validity

+ +This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799): + +* Inadmissible type for tag content +* Inadmissible value for tag content + +Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways: + +* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type. +* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified. + +Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR. + +For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options). + +

+ +## Limitations + +If any of these limitations prevent you from using this library, please open an issue along with a link to your project. + +* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`. +* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items. +* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation. + +## Fuzzing and Code Coverage + +__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release. + +__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project. + +
+ +## Versions and API Changes +This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes. + +These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases: +`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`. + +Exclusions from SemVer: +- Newly added API documented as "subject to change". +- Newly added API in the master branch that has never been tagged in non-beta release. +- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023). + +This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions. + +## Code of Conduct + +This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments. + +## Contributing + +Please open an issue before beginning work on a PR. The improvement may have already been considered, etc. + +For more info, see [How to Contribute](CONTRIBUTING.md). + +## Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +For the full text of the Security Policy, see [SECURITY.md](SECURITY.md). + +## Acknowledgements + +Many thanks to all the contributors on this project! + +I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more. + +I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days. + +Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0. + +This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs. + +Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis). + +Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included! + +This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well. + +## License + +Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker). + +fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text. + +
diff --git a/vendor/github.com/fxamacker/cbor/v2/SECURITY.md b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md new file mode 100644 index 00000000..9c05146d --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +Security fixes are provided for the latest released version of fxamacker/cbor. + +If the security vulnerability is already known to the public, then you can open an issue as a bug report. + +To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public. diff --git a/vendor/github.com/fxamacker/cbor/v2/bytestring.go b/vendor/github.com/fxamacker/cbor/v2/bytestring.go new file mode 100644 index 00000000..823bff12 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/bytestring.go @@ -0,0 +1,63 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "errors" +) + +// ByteString represents CBOR byte string (major type 2). ByteString can be used +// when using a Go []byte is not possible or convenient. For example, Go doesn't +// allow []byte as map key, so ByteString can be used to support data formats +// having CBOR map with byte string keys. ByteString can also be used to +// encode invalid UTF-8 string as CBOR byte string. +// See DecOption.MapKeyByteStringMode for more details. +type ByteString string + +// Bytes returns bytes representing ByteString. +func (bs ByteString) Bytes() []byte { + return []byte(bs) +} + +// MarshalCBOR encodes ByteString as CBOR byte string (major type 2). +func (bs ByteString) MarshalCBOR() ([]byte, error) { + e := getEncodeBuffer() + defer putEncodeBuffer(e) + + // Encode length + encodeHead(e, byte(cborTypeByteString), uint64(len(bs))) + + // Encode data + buf := make([]byte, e.Len()+len(bs)) + n := copy(buf, e.Bytes()) + copy(buf[n:], bs) + + return buf, nil +} + +// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString. +// Decoding CBOR null and CBOR undefined sets ByteString to be empty. +func (bs *ByteString) UnmarshalCBOR(data []byte) error { + if bs == nil { + return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and CBOR undefined to ByteString resets data. + // This behavior is similar to decoding CBOR null and CBOR undefined to []byte. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + *bs = "" + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Check if CBOR data type is byte string + if typ := d.nextCBORType(); typ != cborTypeByteString { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()} + } + + b, _ := d.parseByteString() + *bs = ByteString(b) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/cache.go b/vendor/github.com/fxamacker/cbor/v2/cache.go new file mode 100644 index 00000000..ea0f39e2 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/cache.go @@ -0,0 +1,363 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +type encodeFuncs struct { + ef encodeFunc + ief isEmptyFunc +} + +var ( + decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType + encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType + encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs + typeInfoCache sync.Map // map[reflect.Type]*typeInfo +) + +type specialType int + +const ( + specialTypeNone specialType = iota + specialTypeUnmarshalerIface + specialTypeEmptyIface + specialTypeIface + specialTypeTag + specialTypeTime +) + +type typeInfo struct { + elemTypeInfo *typeInfo + keyTypeInfo *typeInfo + typ reflect.Type + kind reflect.Kind + nonPtrType reflect.Type + nonPtrKind reflect.Kind + spclType specialType +} + +func newTypeInfo(t reflect.Type) *typeInfo { + tInfo := typeInfo{typ: t, kind: t.Kind()} + + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + k := t.Kind() + + tInfo.nonPtrType = t + tInfo.nonPtrKind = k + + if k == reflect.Interface { + if t.NumMethod() == 0 { + tInfo.spclType = specialTypeEmptyIface + } else { + tInfo.spclType = specialTypeIface + } + } else if t == typeTag { + tInfo.spclType = specialTypeTag + } else if t == typeTime { + tInfo.spclType = specialTypeTime + } else if reflect.PtrTo(t).Implements(typeUnmarshaler) { + tInfo.spclType = specialTypeUnmarshalerIface + } + + switch k { + case reflect.Array, reflect.Slice: + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + case reflect.Map: + tInfo.keyTypeInfo = getTypeInfo(t.Key()) + tInfo.elemTypeInfo = getTypeInfo(t.Elem()) + } + + return &tInfo +} + +type decodingStructType struct { + fields fields + fieldIndicesByName map[string]int + err error + toArray bool +} + +// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead, +// here's a very basic implementation of an aggregated error. +type multierror []error + +func (m multierror) Error() string { + var sb strings.Builder + for i, err := range m { + sb.WriteString(err.Error()) + if i < len(m)-1 { + sb.WriteString(", ") + } + } + return sb.String() +} + +func getDecodingStructType(t reflect.Type) *decodingStructType { + if v, _ := decodingStructTypeCache.Load(t); v != nil { + return v.(*decodingStructType) + } + + flds, structOptions := getFields(t) + + toArray := hasToArrayOption(structOptions) + + var errs []error + for i := 0; i < len(flds); i++ { + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")")) + break + } + flds[i].nameAsInt = int64(nameAsInt) + } + + flds[i].typInfo = getTypeInfo(flds[i].typ) + } + + fieldIndicesByName := make(map[string]int, len(flds)) + for i, fld := range flds { + if _, ok := fieldIndicesByName[fld.name]; ok { + errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name)) + continue + } + fieldIndicesByName[fld.name] = i + } + + var err error + { + var multi multierror + for _, each := range errs { + if each != nil { + multi = append(multi, each) + } + } + if len(multi) == 1 { + err = multi[0] + } else if len(multi) > 1 { + err = multi + } + } + + structType := &decodingStructType{ + fields: flds, + fieldIndicesByName: fieldIndicesByName, + err: err, + toArray: toArray, + } + decodingStructTypeCache.Store(t, structType) + return structType +} + +type encodingStructType struct { + fields fields + bytewiseFields fields + lengthFirstFields fields + omitEmptyFieldsIdx []int + err error + toArray bool +} + +func (st *encodingStructType) getFields(em *encMode) fields { + switch em.sort { + case SortNone, SortFastShuffle: + return st.fields + case SortLengthFirst: + return st.lengthFirstFields + default: + return st.bytewiseFields + } +} + +type bytewiseFieldSorter struct { + fields fields +} + +func (x *bytewiseFieldSorter) Len() int { + return len(x.fields) +} + +func (x *bytewiseFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *bytewiseFieldSorter) Less(i, j int) bool { + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +type lengthFirstFieldSorter struct { + fields fields +} + +func (x *lengthFirstFieldSorter) Len() int { + return len(x.fields) +} + +func (x *lengthFirstFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *lengthFirstFieldSorter) Less(i, j int) bool { + if len(x.fields[i].cborName) != len(x.fields[j].cborName) { + return len(x.fields[i].cborName) < len(x.fields[j].cborName) + } + return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0 +} + +func getEncodingStructType(t reflect.Type) (*encodingStructType, error) { + if v, _ := encodingStructTypeCache.Load(t); v != nil { + structType := v.(*encodingStructType) + return structType, structType.err + } + + flds, structOptions := getFields(t) + + if hasToArrayOption(structOptions) { + return getEncodingStructToArrayType(t, flds) + } + + var err error + var hasKeyAsInt bool + var hasKeyAsStr bool + var omitEmptyIdx []int + e := getEncodeBuffer() + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + err = &UnsupportedTypeError{t} + break + } + + // Encode field name + if flds[i].keyAsInt { + nameAsInt, numErr := strconv.Atoi(flds[i].name) + if numErr != nil { + err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")") + break + } + flds[i].nameAsInt = int64(nameAsInt) + if nameAsInt >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt)) + } else { + n := nameAsInt*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(n)) + } + flds[i].cborName = make([]byte, e.Len()) + copy(flds[i].cborName, e.Bytes()) + e.Reset() + + hasKeyAsInt = true + } else { + encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name))) + flds[i].cborName = make([]byte, e.Len()+len(flds[i].name)) + n := copy(flds[i].cborName, e.Bytes()) + copy(flds[i].cborName[n:], flds[i].name) + e.Reset() + + // If cborName contains a text string, then cborNameByteString contains a + // string that has the byte string major type but is otherwise identical to + // cborName. + flds[i].cborNameByteString = make([]byte, len(flds[i].cborName)) + copy(flds[i].cborNameByteString, flds[i].cborName) + // Reset encoded CBOR type to byte string, preserving the "additional + // information" bits: + flds[i].cborNameByteString[0] = byte(cborTypeByteString) | + getAdditionalInformation(flds[i].cborNameByteString[0]) + + hasKeyAsStr = true + } + + // Check if field can be omitted when empty + if flds[i].omitEmpty { + omitEmptyIdx = append(omitEmptyIdx, i) + } + } + putEncodeBuffer(e) + + if err != nil { + structType := &encodingStructType{err: err} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + + // Sort fields by canonical order + bytewiseFields := make(fields, len(flds)) + copy(bytewiseFields, flds) + sort.Sort(&bytewiseFieldSorter{bytewiseFields}) + + lengthFirstFields := bytewiseFields + if hasKeyAsInt && hasKeyAsStr { + lengthFirstFields = make(fields, len(flds)) + copy(lengthFirstFields, flds) + sort.Sort(&lengthFirstFieldSorter{lengthFirstFields}) + } + + structType := &encodingStructType{ + fields: flds, + bytewiseFields: bytewiseFields, + lengthFirstFields: lengthFirstFields, + omitEmptyFieldsIdx: omitEmptyIdx, + } + + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) { + for i := 0; i < len(flds); i++ { + // Get field's encodeFunc + flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ) + if flds[i].ef == nil { + structType := &encodingStructType{err: &UnsupportedTypeError{t}} + encodingStructTypeCache.Store(t, structType) + return structType, structType.err + } + } + + structType := &encodingStructType{ + fields: flds, + toArray: true, + } + encodingStructTypeCache.Store(t, structType) + return structType, structType.err +} + +func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) { + if v, _ := encodeFuncCache.Load(t); v != nil { + fs := v.(encodeFuncs) + return fs.ef, fs.ief + } + ef, ief := getEncodeFuncInternal(t) + encodeFuncCache.Store(t, encodeFuncs{ef, ief}) + return ef, ief +} + +func getTypeInfo(t reflect.Type) *typeInfo { + if v, _ := typeInfoCache.Load(t); v != nil { + return v.(*typeInfo) + } + tInfo := newTypeInfo(t) + typeInfoCache.Store(t, tInfo) + return tInfo +} + +func hasToArrayOption(tag string) bool { + s := ",toarray" + idx := strings.Index(tag, s) + return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',') +} diff --git a/vendor/github.com/fxamacker/cbor/v2/common.go b/vendor/github.com/fxamacker/cbor/v2/common.go new file mode 100644 index 00000000..ec038a49 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/common.go @@ -0,0 +1,182 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "fmt" + "strconv" +) + +type cborType uint8 + +const ( + cborTypePositiveInt cborType = 0x00 + cborTypeNegativeInt cborType = 0x20 + cborTypeByteString cborType = 0x40 + cborTypeTextString cborType = 0x60 + cborTypeArray cborType = 0x80 + cborTypeMap cborType = 0xa0 + cborTypeTag cborType = 0xc0 + cborTypePrimitives cborType = 0xe0 +) + +func (t cborType) String() string { + switch t { + case cborTypePositiveInt: + return "positive integer" + case cborTypeNegativeInt: + return "negative integer" + case cborTypeByteString: + return "byte string" + case cborTypeTextString: + return "UTF-8 text string" + case cborTypeArray: + return "array" + case cborTypeMap: + return "map" + case cborTypeTag: + return "tag" + case cborTypePrimitives: + return "primitives" + default: + return "Invalid type " + strconv.Itoa(int(t)) + } +} + +type additionalInformation uint8 + +const ( + maxAdditionalInformationWithoutArgument = 23 + additionalInformationWith1ByteArgument = 24 + additionalInformationWith2ByteArgument = 25 + additionalInformationWith4ByteArgument = 26 + additionalInformationWith8ByteArgument = 27 + + // For major type 7. + additionalInformationAsFalse = 20 + additionalInformationAsTrue = 21 + additionalInformationAsNull = 22 + additionalInformationAsUndefined = 23 + additionalInformationAsFloat16 = 25 + additionalInformationAsFloat32 = 26 + additionalInformationAsFloat64 = 27 + + // For major type 2, 3, 4, 5. + additionalInformationAsIndefiniteLengthFlag = 31 +) + +const ( + maxSimpleValueInAdditionalInformation = 23 + minSimpleValueIn1ByteArgument = 32 +) + +func (ai additionalInformation) isIndefiniteLength() bool { + return ai == additionalInformationAsIndefiniteLengthFlag +} + +const ( + // From RFC 8949 Section 3: + // "The initial byte of each encoded data item contains both information about the major type + // (the high-order 3 bits, described in Section 3.1) and additional information + // (the low-order 5 bits)." + + // typeMask is used to extract major type in initial byte of encoded data item. + typeMask = 0xe0 + + // additionalInformationMask is used to extract additional information in initial byte of encoded data item. + additionalInformationMask = 0x1f +) + +func getType(raw byte) cborType { + return cborType(raw & typeMask) +} + +func getAdditionalInformation(raw byte) byte { + return raw & additionalInformationMask +} + +func isBreakFlag(raw byte) bool { + return raw == cborBreakFlag +} + +func parseInitialByte(b byte) (t cborType, ai byte) { + return getType(b), getAdditionalInformation(b) +} + +const ( + tagNumRFC3339Time = 0 + tagNumEpochTime = 1 + tagNumUnsignedBignum = 2 + tagNumNegativeBignum = 3 + tagNumExpectedLaterEncodingBase64URL = 21 + tagNumExpectedLaterEncodingBase64 = 22 + tagNumExpectedLaterEncodingBase16 = 23 + tagNumSelfDescribedCBOR = 55799 +) + +const ( + cborBreakFlag = byte(0xff) + cborByteStringWithIndefiniteLengthHead = byte(0x5f) + cborTextStringWithIndefiniteLengthHead = byte(0x7f) + cborArrayWithIndefiniteLengthHead = byte(0x9f) + cborMapWithIndefiniteLengthHead = byte(0xbf) +) + +var ( + cborFalse = []byte{0xf4} + cborTrue = []byte{0xf5} + cborNil = []byte{0xf6} + cborNaN = []byte{0xf9, 0x7e, 0x00} + cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00} + cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00} +) + +// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types. +func validBuiltinTag(tagNum uint64, contentHead byte) error { + t := getType(contentHead) + switch tagNum { + case tagNumRFC3339Time: + // Tag content (date/time text string in RFC 3339 format) must be string type. + if t != cborTypeTextString { + return newInadmissibleTagContentTypeError( + tagNumRFC3339Time, + "text string", + t.String()) + } + return nil + + case tagNumEpochTime: + // Tag content (epoch date/time) must be uint, int, or float type. + if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) { + return newInadmissibleTagContentTypeError( + tagNumEpochTime, + "integer or floating-point number", + t.String()) + } + return nil + + case tagNumUnsignedBignum, tagNumNegativeBignum: + // Tag content (bignum) must be byte type. + if t != cborTypeByteString { + return newInadmissibleTagContentTypeErrorf( + fmt.Sprintf( + "tag number %d or %d must be followed by byte string, got %s", + tagNumUnsignedBignum, + tagNumNegativeBignum, + t.String(), + )) + } + return nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // From RFC 8949 3.4.5.2: + // The data item tagged can be a byte string or any other data item. In the latter + // case, the tag applies to all of the byte string data items contained in the data + // item, except for those contained in a nested data item tagged with an expected + // conversion. + return nil + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/decode.go b/vendor/github.com/fxamacker/cbor/v2/decode.go new file mode 100644 index 00000000..85842ac7 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/decode.go @@ -0,0 +1,3187 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/x448/float16" +) + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using default decoding options. If v is nil, not a pointer, or +// a nil pointer, Unmarshal returns an error. +// +// To unmarshal CBOR into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalCBOR method with a valid +// CBOR value. +// +// To unmarshal CBOR byte string into a value implementing the +// encoding.BinaryUnmarshaler interface, Unmarshal calls that value's +// UnmarshalBinary method with decoded CBOR byte string. +// +// To unmarshal CBOR into a pointer, Unmarshal sets the pointer to nil +// if CBOR data is null (0xf6) or undefined (0xf7). Otherwise, Unmarshal +// unmarshals CBOR into the value pointed to by the pointer. If the +// pointer is nil, Unmarshal creates a new value for it to point to. +// +// To unmarshal CBOR into an empty interface value, Unmarshal uses the +// following rules: +// +// CBOR booleans decode to bool. +// CBOR positive integers decode to uint64. +// CBOR negative integers decode to int64 (big.Int if value overflows). +// CBOR floating points decode to float64. +// CBOR byte strings decode to []byte. +// CBOR text strings decode to string. +// CBOR arrays decode to []interface{}. +// CBOR maps decode to map[interface{}]interface{}. +// CBOR null and undefined values decode to nil. +// CBOR times (tag 0 and 1) decode to time.Time. +// CBOR bignums (tag 2 and 3) decode to big.Int. +// CBOR tags with an unrecognized number decode to cbor.Tag +// +// To unmarshal a CBOR array into a slice, Unmarshal allocates a new slice +// if the CBOR array is empty or slice capacity is less than CBOR array length. +// Otherwise Unmarshal overwrites existing elements, and sets slice length +// to CBOR array length. +// +// To unmarshal a CBOR array into a Go array, Unmarshal decodes CBOR array +// elements into Go array elements. If the Go array is smaller than the +// CBOR array, the extra CBOR array elements are discarded. If the CBOR +// array is smaller than the Go array, the extra Go array elements are +// set to zero values. +// +// To unmarshal a CBOR array into a struct, struct must have a special field "_" +// with struct tag `cbor:",toarray"`. Go array elements are decoded into struct +// fields. Any "omitempty" struct field tag option is ignored in this case. +// +// To unmarshal a CBOR map into a map, Unmarshal allocates a new map only if the +// map is nil. Otherwise Unmarshal reuses the existing map and keeps existing +// entries. Unmarshal stores key-value pairs from the CBOR map into Go map. +// See DecOptions.DupMapKey to enable duplicate map key detection. +// +// To unmarshal a CBOR map into a struct, Unmarshal matches CBOR map keys to the +// keys in the following priority: +// +// 1. "cbor" key in struct field tag, +// 2. "json" key in struct field tag, +// 3. struct field name. +// +// Unmarshal tries an exact match for field name, then a case-insensitive match. +// Map key-value pairs without corresponding struct fields are ignored. See +// DecOptions.ExtraReturnErrors to return error at unknown field. +// +// To unmarshal a CBOR text string into a time.Time value, Unmarshal parses text +// string formatted in RFC3339. To unmarshal a CBOR integer/float into a +// time.Time value, Unmarshal creates an unix time with integer/float as seconds +// and fractional seconds since January 1, 1970 UTC. As a special case, Infinite +// and NaN float values decode to time.Time's zero value. +// +// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a +// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often +// used to mean "not present", unmarshalling CBOR null and undefined value +// into any other Go type has no effect and returns no error. +// +// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time), +// and tag 2 and 3 (bignum). +// +// Unmarshal returns ExtraneousDataError error (without decoding into v) +// if there are any remaining bytes following the first valid CBOR data item. +// See UnmarshalFirst, if you want to unmarshal only the first +// CBOR data item without ExtraneousDataError caused by remaining bytes. +func Unmarshal(data []byte, v interface{}) error { + return defaultDecMode.Unmarshal(data, v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using default decoding options. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + return defaultDecMode.UnmarshalFirst(data, v) +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func Valid(data []byte) error { + return defaultDecMode.Valid(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with default restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func Wellformed(data []byte) error { + return defaultDecMode.Wellformed(data) +} + +// Unmarshaler is the interface implemented by types that wish to unmarshal +// CBOR data themselves. The input is a valid CBOR value. UnmarshalCBOR +// must copy the CBOR data if it needs to use it after returning. +type Unmarshaler interface { + UnmarshalCBOR([]byte) error +} + +// InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +type InvalidUnmarshalError struct { + s string +} + +func (e *InvalidUnmarshalError) Error() string { + return e.s +} + +// UnmarshalTypeError describes a CBOR value that can't be decoded to a Go type. +type UnmarshalTypeError struct { + CBORType string // type of CBOR value + GoType string // type of Go value it could not be decoded into + StructFieldName string // name of the struct field holding the Go value (optional) + errorMsg string // additional error message (optional) +} + +func (e *UnmarshalTypeError) Error() string { + var s string + if e.StructFieldName != "" { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go struct field " + e.StructFieldName + " of type " + e.GoType + } else { + s = "cbor: cannot unmarshal " + e.CBORType + " into Go value of type " + e.GoType + } + if e.errorMsg != "" { + s += " (" + e.errorMsg + ")" + } + return s +} + +// InvalidMapKeyTypeError describes invalid Go map key type when decoding CBOR map. +// For example, Go doesn't allow slice as map key. +type InvalidMapKeyTypeError struct { + GoType string +} + +func (e *InvalidMapKeyTypeError) Error() string { + return "cbor: invalid map key type: " + e.GoType +} + +// DupMapKeyError describes detected duplicate map key in CBOR map. +type DupMapKeyError struct { + Key interface{} + Index int +} + +func (e *DupMapKeyError) Error() string { + return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index) +} + +// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct. +type UnknownFieldError struct { + Index int +} + +func (e *UnknownFieldError) Error() string { + return fmt.Sprintf("cbor: found unknown field at map element index %d", e.Index) +} + +// UnacceptableDataItemError is returned when unmarshaling a CBOR input that contains a data item +// that is not acceptable to a specific CBOR-based application protocol ("invalid or unexpected" as +// described in RFC 8949 Section 5 Paragraph 3). +type UnacceptableDataItemError struct { + CBORType string + Message string +} + +func (e UnacceptableDataItemError) Error() string { + return fmt.Sprintf("cbor: data item of cbor type %s is not accepted by protocol: %s", e.CBORType, e.Message) +} + +// ByteStringExpectedFormatError is returned when unmarshaling CBOR byte string fails when +// using non-default ByteStringExpectedFormat decoding option that makes decoder expect +// a specified format such as base64, hex, etc. +type ByteStringExpectedFormatError struct { + expectedFormatOption ByteStringExpectedFormatMode + err error +} + +func newByteStringExpectedFormatError(expectedFormatOption ByteStringExpectedFormatMode, err error) *ByteStringExpectedFormatError { + return &ByteStringExpectedFormatError{expectedFormatOption, err} +} + +func (e *ByteStringExpectedFormatError) Error() string { + switch e.expectedFormatOption { + case ByteStringExpectedBase64URL: + return fmt.Sprintf("cbor: failed to decode base64url from byte string: %s", e.err) + + case ByteStringExpectedBase64: + return fmt.Sprintf("cbor: failed to decode base64 from byte string: %s", e.err) + + case ByteStringExpectedBase16: + return fmt.Sprintf("cbor: failed to decode hex from byte string: %s", e.err) + + default: + return fmt.Sprintf("cbor: failed to decode byte string in expected format %d: %s", e.expectedFormatOption, e.err) + } +} + +func (e *ByteStringExpectedFormatError) Unwrap() error { + return e.err +} + +// InadmissibleTagContentTypeError is returned when unmarshaling built-in CBOR tags +// fails because of inadmissible type for tag content. Currently, the built-in +// CBOR tags in this codec are tags 0-3 and 21-23. +// See "Tag validity" in RFC 8949 Section 5.3.2. +type InadmissibleTagContentTypeError struct { + s string + tagNum int + expectedTagContentType string + gotTagContentType string +} + +func newInadmissibleTagContentTypeError( + tagNum int, + expectedTagContentType string, + gotTagContentType string, +) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{ + tagNum: tagNum, + expectedTagContentType: expectedTagContentType, + gotTagContentType: gotTagContentType, + } +} + +func newInadmissibleTagContentTypeErrorf(s string) *InadmissibleTagContentTypeError { + return &InadmissibleTagContentTypeError{s: "cbor: " + s} //nolint:goconst // ignore "cbor" +} + +func (e *InadmissibleTagContentTypeError) Error() string { + if e.s == "" { + return fmt.Sprintf( + "cbor: tag number %d must be followed by %s, got %s", + e.tagNum, + e.expectedTagContentType, + e.gotTagContentType, + ) + } + return e.s +} + +// DupMapKeyMode specifies how to enforce duplicate map key. Two map keys are considered duplicates if: +// 1. When decoding into a struct, both keys match the same struct field. The keys are also +// considered duplicates if neither matches any field and decoding to interface{} would produce +// equal (==) values for both keys. +// 2. When decoding into a map, both keys are equal (==) when decoded into values of the +// destination map's key type. +type DupMapKeyMode int + +const ( + // DupMapKeyQuiet doesn't enforce duplicate map key. Decoder quietly (no error) + // uses faster of "keep first" or "keep last" depending on Go data type and other factors. + DupMapKeyQuiet DupMapKeyMode = iota + + // DupMapKeyEnforcedAPF enforces detection and rejection of duplicate map keys. + // APF means "Allow Partial Fill" and the destination map or struct can be partially filled. + // If a duplicate map key is detected, DupMapKeyError is returned without further decoding + // of the map. It's the caller's responsibility to respond to DupMapKeyError by + // discarding the partially filled result if their protocol requires it. + // WARNING: using DupMapKeyEnforcedAPF will decrease performance and increase memory use. + DupMapKeyEnforcedAPF + + maxDupMapKeyMode +) + +func (dmkm DupMapKeyMode) valid() bool { + return dmkm >= 0 && dmkm < maxDupMapKeyMode +} + +// IndefLengthMode specifies whether to allow indefinite length items. +type IndefLengthMode int + +const ( + // IndefLengthAllowed allows indefinite length items. + IndefLengthAllowed IndefLengthMode = iota + + // IndefLengthForbidden disallows indefinite length items. + IndefLengthForbidden + + maxIndefLengthMode +) + +func (m IndefLengthMode) valid() bool { + return m >= 0 && m < maxIndefLengthMode +} + +// TagsMode specifies whether to allow CBOR tags. +type TagsMode int + +const ( + // TagsAllowed allows CBOR tags. + TagsAllowed TagsMode = iota + + // TagsForbidden disallows CBOR tags. + TagsForbidden + + maxTagsMode +) + +func (tm TagsMode) valid() bool { + return tm >= 0 && tm < maxTagsMode +} + +// IntDecMode specifies which Go type (int64, uint64, or big.Int) should +// be used when decoding CBOR integers (major type 0 and 1) to Go interface{}. +type IntDecMode int + +const ( + // IntDecConvertNone affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR unsigned integer (major type 0) to: + // - uint64 + // It decodes CBOR negative integer (major type 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertNone IntDecMode = iota + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value < math.MinInt64 + // - return UnmarshalTypeError if value > math.MaxInt64 + // Deprecated: IntDecConvertSigned should not be used. + // Please use other options, such as IntDecConvertSignedOrError, IntDecConvertSignedOrBigInt, IntDecConvertNone. + IntDecConvertSigned + + // IntDecConvertSignedOrFail affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It decodes CBOR integers (major type 0 and 1) to: + // - int64 if value fits + // - return UnmarshalTypeError if value doesn't fit into int64 + IntDecConvertSignedOrFail + + // IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}. + // It makes CBOR integers (major type 0 and 1) decode to: + // - int64 if value fits + // - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64 + IntDecConvertSignedOrBigInt + + maxIntDec +) + +func (idm IntDecMode) valid() bool { + return idm >= 0 && idm < maxIntDec +} + +// MapKeyByteStringMode specifies how to decode CBOR byte string (major type 2) +// as Go map key when decoding CBOR map key into an empty Go interface value. +// Specifically, this option applies when decoding CBOR map into +// - Go empty interface, or +// - Go map with empty interface as key type. +// The CBOR map key types handled by this option are +// - byte string +// - tagged byte string +// - nested tagged byte string +type MapKeyByteStringMode int + +const ( + // MapKeyByteStringAllowed allows CBOR byte string to be decoded as Go map key. + // Since Go doesn't allow []byte as map key, CBOR byte string is decoded to + // ByteString which has underlying string type. + // This is the default setting. + MapKeyByteStringAllowed MapKeyByteStringMode = iota + + // MapKeyByteStringForbidden forbids CBOR byte string being decoded as Go map key. + // Attempting to decode CBOR byte string as map key into empty interface value + // returns a decoding error. + MapKeyByteStringForbidden + + maxMapKeyByteStringMode +) + +func (mkbsm MapKeyByteStringMode) valid() bool { + return mkbsm >= 0 && mkbsm < maxMapKeyByteStringMode +} + +// ExtraDecErrorCond specifies extra conditions that should be treated as errors. +type ExtraDecErrorCond uint + +// ExtraDecErrorNone indicates no extra error condition. +const ExtraDecErrorNone ExtraDecErrorCond = 0 + +const ( + // ExtraDecErrorUnknownField indicates error condition when destination + // Go struct doesn't have a field matching a CBOR map key. + ExtraDecErrorUnknownField ExtraDecErrorCond = 1 << iota + + maxExtraDecError +) + +func (ec ExtraDecErrorCond) valid() bool { + return ec < maxExtraDecError +} + +// UTF8Mode option specifies if decoder should +// decode CBOR Text containing invalid UTF-8 string. +type UTF8Mode int + +const ( + // UTF8RejectInvalid rejects CBOR Text containing + // invalid UTF-8 string. + UTF8RejectInvalid UTF8Mode = iota + + // UTF8DecodeInvalid allows decoding CBOR Text containing + // invalid UTF-8 string. + UTF8DecodeInvalid + + maxUTF8Mode +) + +func (um UTF8Mode) valid() bool { + return um >= 0 && um < maxUTF8Mode +} + +// FieldNameMatchingMode specifies how string keys in CBOR maps are matched to Go struct field names. +type FieldNameMatchingMode int + +const ( + // FieldNameMatchingPreferCaseSensitive prefers to decode map items into struct fields whose names (or tag + // names) exactly match the item's key. If there is no such field, a map item will be decoded into a field whose + // name is a case-insensitive match for the item's key. + FieldNameMatchingPreferCaseSensitive FieldNameMatchingMode = iota + + // FieldNameMatchingCaseSensitive decodes map items only into a struct field whose name (or tag name) is an + // exact match for the item's key. + FieldNameMatchingCaseSensitive + + maxFieldNameMatchingMode +) + +func (fnmm FieldNameMatchingMode) valid() bool { + return fnmm >= 0 && fnmm < maxFieldNameMatchingMode +} + +// BigIntDecMode specifies how to decode CBOR bignum to Go interface{}. +type BigIntDecMode int + +const ( + // BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int) + // when unmarshalling into a Go interface{}. + BigIntDecodeValue BigIntDecMode = iota + + // BigIntDecodePointer makes CBOR bignum decode to *big.Int when + // unmarshalling into a Go interface{}. + BigIntDecodePointer + + maxBigIntDecMode +) + +func (bidm BigIntDecMode) valid() bool { + return bidm >= 0 && bidm < maxBigIntDecMode +} + +// ByteStringToStringMode specifies the behavior when decoding a CBOR byte string into a Go string. +type ByteStringToStringMode int + +const ( + // ByteStringToStringForbidden generates an error on an attempt to decode a CBOR byte string into a Go string. + ByteStringToStringForbidden ByteStringToStringMode = iota + + // ByteStringToStringAllowed permits decoding a CBOR byte string into a Go string. + ByteStringToStringAllowed + + // ByteStringToStringAllowedWithExpectedLaterEncoding permits decoding a CBOR byte string + // into a Go string. Also, if the byte string is enclosed (directly or indirectly) by one of + // the "expected later encoding" tags (numbers 21 through 23), the destination string will + // be populated by applying the designated text encoding to the contents of the input byte + // string. + ByteStringToStringAllowedWithExpectedLaterEncoding + + maxByteStringToStringMode +) + +func (bstsm ByteStringToStringMode) valid() bool { + return bstsm >= 0 && bstsm < maxByteStringToStringMode +} + +// FieldNameByteStringMode specifies the behavior when decoding a CBOR byte string map key as a Go struct field name. +type FieldNameByteStringMode int + +const ( + // FieldNameByteStringForbidden generates an error on an attempt to decode a CBOR byte string map key as a Go struct field name. + FieldNameByteStringForbidden FieldNameByteStringMode = iota + + // FieldNameByteStringAllowed permits CBOR byte string map keys to be recognized as Go struct field names. + FieldNameByteStringAllowed + + maxFieldNameByteStringMode +) + +func (fnbsm FieldNameByteStringMode) valid() bool { + return fnbsm >= 0 && fnbsm < maxFieldNameByteStringMode +} + +// UnrecognizedTagToAnyMode specifies how to decode unrecognized CBOR tag into an empty interface (any). +// Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. +type UnrecognizedTagToAnyMode int + +const ( + // UnrecognizedTagNumAndContentToAny decodes CBOR tag number and tag content to cbor.Tag + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagNumAndContentToAny UnrecognizedTagToAnyMode = iota + + // UnrecognizedTagContentToAny decodes only CBOR tag content (into its default type) + // when decoding unrecognized CBOR tag into an empty interface. + UnrecognizedTagContentToAny + + maxUnrecognizedTagToAny +) + +func (uttam UnrecognizedTagToAnyMode) valid() bool { + return uttam >= 0 && uttam < maxUnrecognizedTagToAny +} + +// TimeTagToAnyMode specifies how to decode CBOR tag 0 and 1 into an empty interface (any). +// Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. +type TimeTagToAnyMode int + +const ( + // TimeTagToTime decodes CBOR tag 0 and 1 into a time.Time value + // when decoding tag 0 or 1 into an empty interface. + TimeTagToTime TimeTagToAnyMode = iota + + // TimeTagToRFC3339 decodes CBOR tag 0 and 1 into a time string in RFC3339 format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339 + + // TimeTagToRFC3339Nano decodes CBOR tag 0 and 1 into a time string in RFC3339Nano format + // when decoding tag 0 or 1 into an empty interface. + TimeTagToRFC3339Nano + + maxTimeTagToAnyMode +) + +func (tttam TimeTagToAnyMode) valid() bool { + return tttam >= 0 && tttam < maxTimeTagToAnyMode +} + +// SimpleValueRegistry is a registry of unmarshaling behaviors for each possible CBOR simple value +// number (0...23 and 32...255). +type SimpleValueRegistry struct { + rejected [256]bool +} + +// WithRejectedSimpleValue registers the given simple value as rejected. If the simple value is +// encountered in a CBOR input during unmarshaling, an UnacceptableDataItemError is returned. +func WithRejectedSimpleValue(sv SimpleValue) func(*SimpleValueRegistry) error { + return func(r *SimpleValueRegistry) error { + if sv >= 24 && sv <= 31 { + return fmt.Errorf("cbor: cannot set analog for reserved simple value %d", sv) + } + r.rejected[sv] = true + return nil + } +} + +// Creates a new SimpleValueRegistry. The registry state is initialized by executing the provided +// functions in order against a registry that is pre-populated with the defaults for all well-formed +// simple value numbers. +func NewSimpleValueRegistryFromDefaults(fns ...func(*SimpleValueRegistry) error) (*SimpleValueRegistry, error) { + var r SimpleValueRegistry + for _, fn := range fns { + if err := fn(&r); err != nil { + return nil, err + } + } + return &r, nil +} + +// NaNMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing NaN (not-a-number). +type NaNMode int + +const ( + // NaNDecodeAllowed will decode NaN values to Go float32 or float64. + NaNDecodeAllowed NaNMode = iota + + // NaNDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode a NaN value. + NaNDecodeForbidden + + maxNaNDecode +) + +func (ndm NaNMode) valid() bool { + return ndm >= 0 && ndm < maxNaNDecode +} + +// InfMode specifies how to decode floating-point values (major type 7, additional information 25 +// through 27) representing positive or negative infinity. +type InfMode int + +const ( + // InfDecodeAllowed will decode infinite values to Go float32 or float64. + InfDecodeAllowed InfMode = iota + + // InfDecodeForbidden will return an UnacceptableDataItemError on an attempt to decode an + // infinite value. + InfDecodeForbidden + + maxInfDecode +) + +func (idm InfMode) valid() bool { + return idm >= 0 && idm < maxInfDecode +} + +// ByteStringToTimeMode specifies the behavior when decoding a CBOR byte string into a Go time.Time. +type ByteStringToTimeMode int + +const ( + // ByteStringToTimeForbidden generates an error on an attempt to decode a CBOR byte string into a Go time.Time. + ByteStringToTimeForbidden ByteStringToTimeMode = iota + + // ByteStringToTimeAllowed permits decoding a CBOR byte string into a Go time.Time. + ByteStringToTimeAllowed + + maxByteStringToTimeMode +) + +func (bttm ByteStringToTimeMode) valid() bool { + return bttm >= 0 && bttm < maxByteStringToTimeMode +} + +// ByteStringExpectedFormatMode specifies how to decode CBOR byte string into Go byte slice +// when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if +// the CBOR byte string does not contain the expected format (e.g. base64) specified. +// For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" +// in RFC 8949 Section 3.4.5.2. +type ByteStringExpectedFormatMode int + +const ( + // ByteStringExpectedFormatNone copies the unmodified CBOR byte string into Go byte slice + // if the byte string is not tagged by CBOR tag 21-23. + ByteStringExpectedFormatNone ByteStringExpectedFormatMode = iota + + // ByteStringExpectedBase64URL expects CBOR byte strings to contain base64url-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64url-encoded bytes into Go slice. + ByteStringExpectedBase64URL + + // ByteStringExpectedBase64 expects CBOR byte strings to contain base64-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base64-encoded bytes into Go slice. + ByteStringExpectedBase64 + + // ByteStringExpectedBase16 expects CBOR byte strings to contain base16-encoded bytes + // if the byte string is not tagged by CBOR tag 21-23. The decoder will attempt to decode + // the base16-encoded bytes into Go slice. + ByteStringExpectedBase16 + + maxByteStringExpectedFormatMode +) + +func (bsefm ByteStringExpectedFormatMode) valid() bool { + return bsefm >= 0 && bsefm < maxByteStringExpectedFormatMode +} + +// BignumTagMode specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can be +// decoded. +type BignumTagMode int + +const ( + // BignumTagAllowed allows bignum tags to be decoded. + BignumTagAllowed BignumTagMode = iota + + // BignumTagForbidden produces an UnacceptableDataItemError during Unmarshal if a bignum tag + // is encountered in the input. + BignumTagForbidden + + maxBignumTag +) + +func (btm BignumTagMode) valid() bool { + return btm >= 0 && btm < maxBignumTag +} + +// BinaryUnmarshalerMode specifies how to decode into types that implement +// encoding.BinaryUnmarshaler. +type BinaryUnmarshalerMode int + +const ( + // BinaryUnmarshalerByteString will invoke UnmarshalBinary on the contents of a CBOR byte + // string when decoding into a value that implements BinaryUnmarshaler. + BinaryUnmarshalerByteString BinaryUnmarshalerMode = iota + + // BinaryUnmarshalerNone does not recognize BinaryUnmarshaler implementations during decode. + BinaryUnmarshalerNone + + maxBinaryUnmarshalerMode +) + +func (bum BinaryUnmarshalerMode) valid() bool { + return bum >= 0 && bum < maxBinaryUnmarshalerMode +} + +// DecOptions specifies decoding options. +type DecOptions struct { + // DupMapKey specifies whether to enforce duplicate map key. + DupMapKey DupMapKeyMode + + // TimeTag specifies whether or not untagged data items, or tags other + // than tag 0 and tag 1, can be decoded to time.Time. If tag 0 or tag 1 + // appears in an input, the type of its content is always validated as + // specified in RFC 8949. That behavior is not controlled by this + // option. The behavior of the supported modes are: + // + // DecTagIgnored (default): Untagged text strings and text strings + // enclosed in tags other than 0 and 1 are decoded as though enclosed + // in tag 0. Untagged unsigned integers, negative integers, and + // floating-point numbers (or those enclosed in tags other than 0 and + // 1) are decoded as though enclosed in tag 1. Decoding a tag other + // than 0 or 1 enclosing simple values null or undefined into a + // time.Time does not modify the destination value. + // + // DecTagOptional: Untagged text strings are decoded as though + // enclosed in tag 0. Untagged unsigned integers, negative integers, + // and floating-point numbers are decoded as though enclosed in tag + // 1. Tags other than 0 and 1 will produce an error on attempts to + // decode them into a time.Time. + // + // DecTagRequired: Only tags 0 and 1 can be decoded to time.Time. Any + // other input will produce an error. + TimeTag DecTagMode + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // IntDec specifies which Go integer type (int64 or uint64) to use + // when decoding CBOR int (major type 0 and 1) to Go interface{}. + IntDec IntDecMode + + // MapKeyByteString specifies how to decode CBOR byte string as map key + // when decoding CBOR map with byte string key into an empty interface value. + // By default, an error is returned when attempting to decode CBOR byte string + // as map key because Go doesn't allow []byte as map key. + MapKeyByteString MapKeyByteStringMode + + // ExtraReturnErrors specifies extra conditions that should be treated as errors. + ExtraReturnErrors ExtraDecErrorCond + + // DefaultMapType specifies Go map type to create and decode to + // when unmarshalling CBOR into an empty interface value. + // By default, unmarshal uses map[interface{}]interface{}. + DefaultMapType reflect.Type + + // UTF8 specifies if decoder should decode CBOR Text containing invalid UTF-8. + // By default, unmarshal rejects CBOR text containing invalid UTF-8. + UTF8 UTF8Mode + + // FieldNameMatching specifies how string keys in CBOR maps are matched to Go struct field names. + FieldNameMatching FieldNameMatchingMode + + // BigIntDec specifies how to decode CBOR bignum to Go interface{}. + BigIntDec BigIntDecMode + + // DefaultByteStringType is the Go type that should be produced when decoding a CBOR byte + // string into an empty interface value. Types to which a []byte is convertible are valid + // for this option, except for array and pointer-to-array types. If nil, the default is + // []byte. + DefaultByteStringType reflect.Type + + // ByteStringToString specifies the behavior when decoding a CBOR byte string into a Go string. + ByteStringToString ByteStringToStringMode + + // FieldNameByteString specifies the behavior when decoding a CBOR byte string map key as a + // Go struct field name. + FieldNameByteString FieldNameByteStringMode + + // UnrecognizedTagToAny specifies how to decode unrecognized CBOR tag into an empty interface. + // Currently, recognized CBOR tag numbers are 0, 1, 2, 3, or registered by TagSet. + UnrecognizedTagToAny UnrecognizedTagToAnyMode + + // TimeTagToAny specifies how to decode CBOR tag 0 and 1 into an empty interface (any). + // Based on the specified mode, Unmarshal can return a time.Time value or a time string in a specific format. + TimeTagToAny TimeTagToAnyMode + + // SimpleValues is an immutable mapping from each CBOR simple value to a corresponding + // unmarshal behavior. If nil, the simple values false, true, null, and undefined are mapped + // to the Go analog values false, true, nil, and nil, respectively, and all other simple + // values N (except the reserved simple values 24 through 31) are mapped to + // cbor.SimpleValue(N). In other words, all well-formed simple values can be decoded. + // + // Users may provide a custom SimpleValueRegistry constructed via + // NewSimpleValueRegistryFromDefaults. + SimpleValues *SimpleValueRegistry + + // NaN specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing NaN (not-a-number). + NaN NaNMode + + // Inf specifies how to decode floating-point values (major type 7, additional information + // 25 through 27) representing positive or negative infinity. + Inf InfMode + + // ByteStringToTime specifies how to decode CBOR byte string into Go time.Time. + ByteStringToTime ByteStringToTimeMode + + // ByteStringExpectedFormat specifies how to decode CBOR byte string into Go byte slice + // when the byte string is NOT enclosed in CBOR tag 21, 22, or 23. An error is returned if + // the CBOR byte string does not contain the expected format (e.g. base64) specified. + // For tags 21-23, see "Expected Later Encoding for CBOR-to-JSON Converters" + // in RFC 8949 Section 3.4.5.2. + ByteStringExpectedFormat ByteStringExpectedFormatMode + + // BignumTag specifies whether or not the "bignum" tags 2 and 3 (RFC 8949 Section 3.4.3) can + // be decoded. Unlike BigIntDec, this option applies to all bignum tags encountered in a + // CBOR input, independent of the type of the destination value of a particular Unmarshal + // operation. + BignumTag BignumTagMode + + // BinaryUnmarshaler specifies how to decode into types that implement + // encoding.BinaryUnmarshaler. + BinaryUnmarshaler BinaryUnmarshalerMode +} + +// DecMode returns DecMode with immutable options and no tags (safe for concurrency). +func (opts DecOptions) DecMode() (DecMode, error) { //nolint:gocritic // ignore hugeParam + return opts.decMode() +} + +// validForTags checks that the provided tag set is compatible with these options and returns a +// non-nil error if and only if the provided tag set is incompatible. +func (opts DecOptions) validForTags(tags TagSet) error { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return errors.New("cbor: cannot create DecMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return errors.New("cbor: cannot create DecMode with nil value as TagSet") + } + if opts.ByteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + opts.ByteStringExpectedFormat != ByteStringExpectedFormatNone { + for _, tagNum := range []uint64{ + tagNumExpectedLaterEncodingBase64URL, + tagNumExpectedLaterEncodingBase64, + tagNumExpectedLaterEncodingBase16, + } { + if rt := tags.getTypeFromTagNum([]uint64{tagNum}); rt != nil { + return fmt.Errorf("cbor: DecMode with non-default StringExpectedEncoding or ByteSliceExpectedEncoding treats tag %d as built-in and conflicts with the provided TagSet's registration of %v", tagNum, rt) + } + } + + } + return nil +} + +// DecModeWithTags returns DecMode with options and tags that are both immutable (safe for concurrency). +func (opts DecOptions) DecModeWithTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.DecTag != DecTagIgnored { + ts[contentType] = tag + } + } + syncTags.RUnlock() + + if len(ts) > 0 { + dm.tags = ts + } + + return dm, nil +} + +// DecModeWithSharedTags returns DecMode with immutable options and mutable shared tags (safe for concurrency). +func (opts DecOptions) DecModeWithSharedTags(tags TagSet) (DecMode, error) { //nolint:gocritic // ignore hugeParam + if err := opts.validForTags(tags); err != nil { + return nil, err + } + dm, err := opts.decMode() + if err != nil { + return nil, err + } + dm.tags = tags + return dm, nil +} + +const ( + defaultMaxArrayElements = 131072 + minMaxArrayElements = 16 + maxMaxArrayElements = 2147483647 + + defaultMaxMapPairs = 131072 + minMaxMapPairs = 16 + maxMaxMapPairs = 2147483647 + + defaultMaxNestedLevels = 32 + minMaxNestedLevels = 4 + maxMaxNestedLevels = 65535 +) + +var defaultSimpleValues = func() *SimpleValueRegistry { + registry, err := NewSimpleValueRegistryFromDefaults() + if err != nil { + panic(err) + } + return registry +}() + +//nolint:gocyclo // Each option comes with some manageable boilerplate +func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.DupMapKey.valid() { + return nil, errors.New("cbor: invalid DupMapKey " + strconv.Itoa(int(opts.DupMapKey))) + } + + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + + if !opts.IntDec.valid() { + return nil, errors.New("cbor: invalid IntDec " + strconv.Itoa(int(opts.IntDec))) + } + + if !opts.MapKeyByteString.valid() { + return nil, errors.New("cbor: invalid MapKeyByteString " + strconv.Itoa(int(opts.MapKeyByteString))) + } + + if opts.MaxNestedLevels == 0 { + opts.MaxNestedLevels = defaultMaxNestedLevels + } else if opts.MaxNestedLevels < minMaxNestedLevels || opts.MaxNestedLevels > maxMaxNestedLevels { + return nil, errors.New("cbor: invalid MaxNestedLevels " + strconv.Itoa(opts.MaxNestedLevels) + + " (range is [" + strconv.Itoa(minMaxNestedLevels) + ", " + strconv.Itoa(maxMaxNestedLevels) + "])") + } + + if opts.MaxArrayElements == 0 { + opts.MaxArrayElements = defaultMaxArrayElements + } else if opts.MaxArrayElements < minMaxArrayElements || opts.MaxArrayElements > maxMaxArrayElements { + return nil, errors.New("cbor: invalid MaxArrayElements " + strconv.Itoa(opts.MaxArrayElements) + + " (range is [" + strconv.Itoa(minMaxArrayElements) + ", " + strconv.Itoa(maxMaxArrayElements) + "])") + } + + if opts.MaxMapPairs == 0 { + opts.MaxMapPairs = defaultMaxMapPairs + } else if opts.MaxMapPairs < minMaxMapPairs || opts.MaxMapPairs > maxMaxMapPairs { + return nil, errors.New("cbor: invalid MaxMapPairs " + strconv.Itoa(opts.MaxMapPairs) + + " (range is [" + strconv.Itoa(minMaxMapPairs) + ", " + strconv.Itoa(maxMaxMapPairs) + "])") + } + + if !opts.ExtraReturnErrors.valid() { + return nil, errors.New("cbor: invalid ExtraReturnErrors " + strconv.Itoa(int(opts.ExtraReturnErrors))) + } + + if opts.DefaultMapType != nil && opts.DefaultMapType.Kind() != reflect.Map { + return nil, fmt.Errorf("cbor: invalid DefaultMapType %s", opts.DefaultMapType) + } + + if !opts.UTF8.valid() { + return nil, errors.New("cbor: invalid UTF8 " + strconv.Itoa(int(opts.UTF8))) + } + + if !opts.FieldNameMatching.valid() { + return nil, errors.New("cbor: invalid FieldNameMatching " + strconv.Itoa(int(opts.FieldNameMatching))) + } + + if !opts.BigIntDec.valid() { + return nil, errors.New("cbor: invalid BigIntDec " + strconv.Itoa(int(opts.BigIntDec))) + } + + if opts.DefaultByteStringType != nil && + opts.DefaultByteStringType.Kind() != reflect.String && + (opts.DefaultByteStringType.Kind() != reflect.Slice || opts.DefaultByteStringType.Elem().Kind() != reflect.Uint8) { + return nil, fmt.Errorf("cbor: invalid DefaultByteStringType: %s is not of kind string or []uint8", opts.DefaultByteStringType) + } + + if !opts.ByteStringToString.valid() { + return nil, errors.New("cbor: invalid ByteStringToString " + strconv.Itoa(int(opts.ByteStringToString))) + } + + if !opts.FieldNameByteString.valid() { + return nil, errors.New("cbor: invalid FieldNameByteString " + strconv.Itoa(int(opts.FieldNameByteString))) + } + + if !opts.UnrecognizedTagToAny.valid() { + return nil, errors.New("cbor: invalid UnrecognizedTagToAnyMode " + strconv.Itoa(int(opts.UnrecognizedTagToAny))) + } + simpleValues := opts.SimpleValues + if simpleValues == nil { + simpleValues = defaultSimpleValues + } + + if !opts.TimeTagToAny.valid() { + return nil, errors.New("cbor: invalid TimeTagToAny " + strconv.Itoa(int(opts.TimeTagToAny))) + } + + if !opts.NaN.valid() { + return nil, errors.New("cbor: invalid NaNDec " + strconv.Itoa(int(opts.NaN))) + } + + if !opts.Inf.valid() { + return nil, errors.New("cbor: invalid InfDec " + strconv.Itoa(int(opts.Inf))) + } + + if !opts.ByteStringToTime.valid() { + return nil, errors.New("cbor: invalid ByteStringToTime " + strconv.Itoa(int(opts.ByteStringToTime))) + } + + if !opts.ByteStringExpectedFormat.valid() { + return nil, errors.New("cbor: invalid ByteStringExpectedFormat " + strconv.Itoa(int(opts.ByteStringExpectedFormat))) + } + + if !opts.BignumTag.valid() { + return nil, errors.New("cbor: invalid BignumTag " + strconv.Itoa(int(opts.BignumTag))) + } + + if !opts.BinaryUnmarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler))) + } + + dm := decMode{ + dupMapKey: opts.DupMapKey, + timeTag: opts.TimeTag, + maxNestedLevels: opts.MaxNestedLevels, + maxArrayElements: opts.MaxArrayElements, + maxMapPairs: opts.MaxMapPairs, + indefLength: opts.IndefLength, + tagsMd: opts.TagsMd, + intDec: opts.IntDec, + mapKeyByteString: opts.MapKeyByteString, + extraReturnErrors: opts.ExtraReturnErrors, + defaultMapType: opts.DefaultMapType, + utf8: opts.UTF8, + fieldNameMatching: opts.FieldNameMatching, + bigIntDec: opts.BigIntDec, + defaultByteStringType: opts.DefaultByteStringType, + byteStringToString: opts.ByteStringToString, + fieldNameByteString: opts.FieldNameByteString, + unrecognizedTagToAny: opts.UnrecognizedTagToAny, + timeTagToAny: opts.TimeTagToAny, + simpleValues: simpleValues, + nanDec: opts.NaN, + infDec: opts.Inf, + byteStringToTime: opts.ByteStringToTime, + byteStringExpectedFormat: opts.ByteStringExpectedFormat, + bignumTag: opts.BignumTag, + binaryUnmarshaler: opts.BinaryUnmarshaler, + } + + return &dm, nil +} + +// DecMode is the main interface for CBOR decoding. +type DecMode interface { + // Unmarshal parses the CBOR-encoded data into the value pointed to by v + // using the decoding mode. If v is nil, not a pointer, or a nil pointer, + // Unmarshal returns an error. + // + // See the documentation for Unmarshal for details. + Unmarshal(data []byte, v interface{}) error + + // UnmarshalFirst parses the first CBOR data item into the value pointed to by v + // using the decoding mode. Any remaining bytes are returned in rest. + // + // If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. + // + // See the documentation for Unmarshal for details. + UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) + + // Valid checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + // + // WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) + // and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". + // + // Deprecated: Valid is kept for compatibility and should not be used. + // Use Wellformed instead because it has a more appropriate name. + Valid(data []byte) error + + // Wellformed checks whether data is a well-formed encoded CBOR data item and + // that it complies with configurable restrictions such as MaxNestedLevels, + // MaxArrayElements, MaxMapPairs, etc. + // + // If there are any remaining bytes after the CBOR data item, + // an ExtraneousDataError is returned. + Wellformed(data []byte) error + + // NewDecoder returns a new decoder that reads from r using dm DecMode. + NewDecoder(r io.Reader) *Decoder + + // DecOptions returns user specified options used to create this DecMode. + DecOptions() DecOptions +} + +type decMode struct { + tags tagProvider + dupMapKey DupMapKeyMode + timeTag DecTagMode + maxNestedLevels int + maxArrayElements int + maxMapPairs int + indefLength IndefLengthMode + tagsMd TagsMode + intDec IntDecMode + mapKeyByteString MapKeyByteStringMode + extraReturnErrors ExtraDecErrorCond + defaultMapType reflect.Type + utf8 UTF8Mode + fieldNameMatching FieldNameMatchingMode + bigIntDec BigIntDecMode + defaultByteStringType reflect.Type + byteStringToString ByteStringToStringMode + fieldNameByteString FieldNameByteStringMode + unrecognizedTagToAny UnrecognizedTagToAnyMode + timeTagToAny TimeTagToAnyMode + simpleValues *SimpleValueRegistry + nanDec NaNMode + infDec InfMode + byteStringToTime ByteStringToTimeMode + byteStringExpectedFormat ByteStringExpectedFormatMode + bignumTag BignumTagMode + binaryUnmarshaler BinaryUnmarshalerMode +} + +var defaultDecMode, _ = DecOptions{}.decMode() + +// DecOptions returns user specified options used to create this DecMode. +func (dm *decMode) DecOptions() DecOptions { + simpleValues := dm.simpleValues + if simpleValues == defaultSimpleValues { + // Users can't explicitly set this to defaultSimpleValues. It must have been nil in + // the original DecOptions. + simpleValues = nil + } + + return DecOptions{ + DupMapKey: dm.dupMapKey, + TimeTag: dm.timeTag, + MaxNestedLevels: dm.maxNestedLevels, + MaxArrayElements: dm.maxArrayElements, + MaxMapPairs: dm.maxMapPairs, + IndefLength: dm.indefLength, + TagsMd: dm.tagsMd, + IntDec: dm.intDec, + MapKeyByteString: dm.mapKeyByteString, + ExtraReturnErrors: dm.extraReturnErrors, + DefaultMapType: dm.defaultMapType, + UTF8: dm.utf8, + FieldNameMatching: dm.fieldNameMatching, + BigIntDec: dm.bigIntDec, + DefaultByteStringType: dm.defaultByteStringType, + ByteStringToString: dm.byteStringToString, + FieldNameByteString: dm.fieldNameByteString, + UnrecognizedTagToAny: dm.unrecognizedTagToAny, + TimeTagToAny: dm.timeTagToAny, + SimpleValues: simpleValues, + NaN: dm.nanDec, + Inf: dm.infDec, + ByteStringToTime: dm.byteStringToTime, + ByteStringExpectedFormat: dm.byteStringExpectedFormat, + BignumTag: dm.bignumTag, + BinaryUnmarshaler: dm.binaryUnmarshaler, + } +} + +// Unmarshal parses the CBOR-encoded data into the value pointed to by v +// using dm decoding mode. If v is nil, not a pointer, or a nil pointer, +// Unmarshal returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) Unmarshal(data []byte, v interface{}) error { + d := decoder{data: data, dm: dm} + + // Check well-formedness. + off := d.off // Save offset before data validation + err := d.wellformed(false, false) // don't allow any extra data after valid data item. + d.off = off // Restore offset + if err != nil { + return err + } + + return d.value(v) +} + +// UnmarshalFirst parses the first CBOR data item into the value pointed to by v +// using dm decoding mode. Any remaining bytes are returned in rest. +// +// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error. +// +// See the documentation for Unmarshal for details. +func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) { + d := decoder{data: data, dm: dm} + + // check well-formedness. + off := d.off // Save offset before data validation + err = d.wellformed(true, false) // allow extra data after well-formed data item + d.off = off // Restore offset + + // If it is well-formed, parse the value. This is structured like this to allow + // better test coverage + if err == nil { + err = d.value(v) + } + + // If either wellformed or value returned an error, do not return rest bytes + if err != nil { + return nil, err + } + + // Return the rest of the data slice (which might be len 0) + return d.data[d.off:], nil +} + +// Valid checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +// +// WARNING: Valid doesn't check if encoded CBOR data item is valid (i.e. validity) +// and RFC 8949 distinctly defines what is "Valid" and what is "Well-formed". +// +// Deprecated: Valid is kept for compatibility and should not be used. +// Use Wellformed instead because it has a more appropriate name. +func (dm *decMode) Valid(data []byte) error { + return dm.Wellformed(data) +} + +// Wellformed checks whether data is a well-formed encoded CBOR data item and +// that it complies with configurable restrictions such as MaxNestedLevels, +// MaxArrayElements, MaxMapPairs, etc. +// +// If there are any remaining bytes after the CBOR data item, +// an ExtraneousDataError is returned. +func (dm *decMode) Wellformed(data []byte) error { + d := decoder{data: data, dm: dm} + return d.wellformed(false, false) +} + +// NewDecoder returns a new decoder that reads from r using dm DecMode. +func (dm *decMode) NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r, d: decoder{dm: dm}} +} + +type decoder struct { + data []byte + off int // next read offset in data + dm *decMode + + // expectedLaterEncodingTags stores a stack of encountered "Expected Later Encoding" tags, + // if any. + // + // The "Expected Later Encoding" tags (21 to 23) are valid for any data item. When decoding + // byte strings, the effective encoding comes from the tag nearest to the byte string being + // decoded. For example, the effective encoding of the byte string 21(22(h'41')) would be + // controlled by tag 22,and in the data item 23(h'42', 22([21(h'43')])]) the effective + // encoding of the byte strings h'42' and h'43' would be controlled by tag 23 and 21, + // respectively. + expectedLaterEncodingTags []uint64 +} + +// value decodes CBOR data item into the value pointed to by v. +// If CBOR data item fails to be decoded into v, +// error is returned and offset is moved to the next CBOR data item. +// Precondition: d.data contains at least one well-formed CBOR data item. +func (d *decoder) value(v interface{}) error { + // v can't be nil, non-pointer, or nil pointer value. + if v == nil { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"} + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"} + } else if rv.IsNil() { + return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"} + } + rv = rv.Elem() + return d.parseToValue(rv, getTypeInfo(rv.Type())) +} + +// parseToValue decodes CBOR data to value. It assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + + // Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil. + if d.nextCBORNil() && v.Kind() == reflect.Ptr { + d.skip() + v.Set(reflect.Zero(v.Type())) + return nil + } + + if tInfo.spclType == specialTypeIface { + if !v.IsNil() { + // Use value type + v = v.Elem() + tInfo = getTypeInfo(v.Type()) + } else { //nolint:gocritic + // Create and use registered type if CBOR data is registered tag + if d.dm.tags != nil && d.nextCBORType() == cborTypeTag { + + off := d.off + var tagNums []uint64 + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + tagNums = append(tagNums, tagNum) + } + d.off = off + + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + if registeredType.Implements(tInfo.nonPtrType) || + reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) { + v.Set(reflect.New(registeredType)) + v = v.Elem() + tInfo = getTypeInfo(registeredType) + } + } + } + } + } + + // Create new value for the pointer v to point to. + // At this point, CBOR value is not nil/undefined if v is a pointer. + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + d.skip() + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + + // Strip self-described CBOR tag number. + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return err + } + } + d.off = off + + if tInfo.spclType != specialTypeNone { + switch tInfo.spclType { + case specialTypeEmptyIface: + iv, err := d.parse(false) // Skipped self-described CBOR tag number already. + if iv != nil { + v.Set(reflect.ValueOf(iv)) + } + return err + + case specialTypeTag: + return d.parseToTag(v) + + case specialTypeTime: + if d.nextCBORNil() { + // Decoding CBOR null and undefined to time.Time is no-op. + d.skip() + return nil + } + tm, ok, err := d.parseToTime() + if err != nil { + return err + } + if ok { + v.Set(reflect.ValueOf(tm)) + } + return nil + + case specialTypeUnmarshalerIface: + return d.parseToUnmarshaler(v) + } + } + + // Check registered tag number + if tagItem := d.getRegisteredTagItem(tInfo.nonPtrType); tagItem != nil { + t := d.nextCBORType() + if t != cborTypeTag { + if tagItem.opts.DecTag == DecTagRequired { + d.skip() // Required tag number is absent, skip entire tag + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.typ.String(), + errorMsg: "expect CBOR tag value"} + } + } else if err := d.validRegisteredTagNums(tagItem); err != nil { + d.skip() // Skip tag content + return err + } + } + + t := d.nextCBORType() + + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + return fillPositiveInt(t, val, v) + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + nValue := int64(-1) ^ int64(val) + return fillNegativeInt(t, nValue, v) + + case cborTypeByteString: + b, copied := d.parseByteString() + b, converted, err := d.applyByteStringTextConversion(b, v.Type()) + if err != nil { + return err + } + copied = copied || converted + return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler) + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return err + } + return fillTextString(t, b, v) + + case cborTypePrimitives: + _, ai, val := d.getHead() + switch ai { + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return fillFloat(t, f, v) + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return fillFloat(t, f, v) + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return fillFloat(t, f, v) + + default: // ai <= 24 + if d.dm.simpleValues.rejected[SimpleValue(val)] { + return &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return fillBool(t, ai == additionalInformationAsTrue, v) + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return fillNil(t, v) + + default: + return fillPositiveInt(t, val, v) + } + } + + case cborTypeTag: + _, _, tagNum := d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + // Bignum (tag 2) can be decoded to uint, int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsUint64() { + return fillPositiveInt(t, bi.Uint64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumNegativeBignum: + // Bignum (tag 3) can be decoded to int, float, slice, array, or big.Int. + b, copied := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if tInfo.nonPtrType == typeBigInt { + v.Set(reflect.ValueOf(*bi)) + return nil + } + if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array { + return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler) + } + if bi.IsInt64() { + return fillNegativeInt(t, bi.Int64(), v) + } + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: bi.String() + " overflows " + v.Type().String(), + } + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + } + } + + return d.parseToValue(v, tInfo) + + case cborTypeArray: + if tInfo.nonPtrKind == reflect.Slice { + return d.parseArrayToSlice(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Array { + return d.parseArrayToArray(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Struct { + return d.parseArrayToStruct(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + + case cborTypeMap: + if tInfo.nonPtrKind == reflect.Struct { + return d.parseMapToStruct(v, tInfo) + } else if tInfo.nonPtrKind == reflect.Map { + return d.parseMapToMap(v, tInfo) + } + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: tInfo.nonPtrType.String()} + } + + return nil +} + +func (d *decoder) parseToTag(v reflect.Value) error { + if d.nextCBORNil() { + // Decoding CBOR null and undefined to cbor.Tag is no-op. + d.skip() + return nil + } + + t := d.nextCBORType() + if t != cborTypeTag { + d.skip() + return &UnmarshalTypeError{CBORType: t.String(), GoType: typeTag.String()} + } + + // Unmarshal tag number + _, _, num := d.getHead() + + // Unmarshal tag content + content, err := d.parse(false) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(Tag{num, content})) + return nil +} + +// parseToTime decodes the current data item as a time.Time. The bool return value is false if and +// only if the destination value should remain unmodified. +func (d *decoder) parseToTime() (time.Time, bool, error) { + // Verify that tag number or absence of tag number is acceptable to specified timeTag. + if t := d.nextCBORType(); t == cborTypeTag { + if d.dm.timeTag == DecTagIgnored { + // Skip all enclosing tags + for t == cborTypeTag { + d.getHead() + t = d.nextCBORType() + } + if d.nextCBORNil() { + d.skip() + return time.Time{}, false, nil + } + } else { + // Read tag number + _, _, tagNum := d.getHead() + if tagNum != 0 && tagNum != 1 { + d.skip() // skip tag content + return time.Time{}, false, errors.New("cbor: wrong tag number for time.Time, got " + strconv.Itoa(int(tagNum)) + ", expect 0 or 1") + } + } + } else { + if d.dm.timeTag == DecTagRequired { + d.skip() + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String(), errorMsg: "expect CBOR tag value"} + } + } + + switch t := d.nextCBORType(); t { + case cborTypeByteString: + if d.dm.byteStringToTime == ByteStringToTimeAllowed { + b, _ := d.parseByteString() + t, err := time.Parse(time.RFC3339, string(b)) + if err != nil { + return time.Time{}, false, fmt.Errorf("cbor: cannot set %q for time.Time: %w", string(b), err) + } + return t, true, nil + } + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + + case cborTypeTextString: + s, err := d.parseTextString() + if err != nil { + return time.Time{}, false, err + } + t, err := time.Parse(time.RFC3339, string(s)) + if err != nil { + return time.Time{}, false, errors.New("cbor: cannot set " + string(s) + " for time.Time: " + err.Error()) + } + return t, true, nil + + case cborTypePositiveInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("%d overflows Go's int64", val), + } + } + return time.Unix(int64(val), 0), true, nil + + case cborTypeNegativeInt: + _, _, val := d.getHead() + if val > math.MaxInt64 { + if val == math.MaxUint64 { + // Maximum absolute value representable by negative integer is 2^64, + // not 2^64-1, so it overflows uint64. + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: "-18446744073709551616 overflows Go's int64", + } + } + return time.Time{}, false, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: typeTime.String(), + errorMsg: fmt.Sprintf("-%d overflows Go's int64", val+1), + } + } + return time.Unix(int64(-1)^int64(val), 0), true, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + var f float64 + switch ai { + case additionalInformationAsFloat16: + f = float64(float16.Frombits(uint16(val)).Float32()) + + case additionalInformationAsFloat32: + f = float64(math.Float32frombits(uint32(val))) + + case additionalInformationAsFloat64: + f = math.Float64frombits(val) + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } + + if math.IsNaN(f) || math.IsInf(f, 0) { + // https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.2-6 + return time.Time{}, true, nil + } + seconds, fractional := math.Modf(f) + return time.Unix(int64(seconds), int64(fractional*1e9)), true, nil + + default: + return time.Time{}, false, &UnmarshalTypeError{CBORType: t.String(), GoType: typeTime.String()} + } +} + +// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parseToUnmarshaler(v reflect.Value) error { + if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() { + d.skip() + return nil + } + + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + } + if u, ok := v.Interface().(Unmarshaler); ok { + start := d.off + d.skip() + return u.UnmarshalCBOR(d.data[start:d.off]) + } + d.skip() + return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler") +} + +// parse parses CBOR data and returns value in default Go type. +// It assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo + // Strip self-described CBOR tag number. + if skipSelfDescribedTag { + for d.nextCBORType() == cborTypeTag { + off := d.off + _, _, tagNum := d.getHead() + if tagNum != tagNumSelfDescribedCBOR { + d.off = off + break + } + } + } + + // Check validity of supported built-in tags. + off := d.off + for d.nextCBORType() == cborTypeTag { + _, _, tagNum := d.getHead() + if err := validBuiltinTag(tagNum, d.data[d.off]); err != nil { + d.skip() + return nil, err + } + } + d.off = off + + t := d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := d.getHead() + + switch d.dm.intDec { + case IntDecConvertNone: + return val, nil + + case IntDecConvertSigned, IntDecConvertSignedOrFail: + if val > math.MaxInt64 { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + + return int64(val), nil + + case IntDecConvertSignedOrBigInt: + if val > math.MaxInt64 { + bi := new(big.Int).SetUint64(val) + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + return int64(val), nil + + default: + // not reachable + } + + case cborTypeNegativeInt: + _, _, val := d.getHead() + + if val > math.MaxInt64 { + // CBOR negative integer value overflows Go int64, use big.Int instead. + bi := new(big.Int).SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.intDec == IntDecConvertSignedOrFail { + return nil, &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: bi.String() + " overflows Go's int64", + } + } + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + } + + nValue := int64(-1) ^ int64(val) + return nValue, nil + + case cborTypeByteString: + b, copied := d.parseByteString() + var effectiveByteStringType = d.dm.defaultByteStringType + if effectiveByteStringType == nil { + effectiveByteStringType = typeByteSlice + } + b, converted, err := d.applyByteStringTextConversion(b, effectiveByteStringType) + if err != nil { + return nil, err + } + copied = copied || converted + + switch effectiveByteStringType { + case typeByteSlice: + if copied { + return b, nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return clone, nil + + case typeString: + return string(b), nil + + default: + if copied || d.dm.defaultByteStringType.Kind() == reflect.String { + // Avoid an unnecessary copy since the conversion to string must + // copy the underlying bytes. + return reflect.ValueOf(b).Convert(d.dm.defaultByteStringType).Interface(), nil + } + clone := make([]byte, len(b)) + copy(clone, b) + return reflect.ValueOf(clone).Convert(d.dm.defaultByteStringType).Interface(), nil + } + + case cborTypeTextString: + b, err := d.parseTextString() + if err != nil { + return nil, err + } + return string(b), nil + + case cborTypeTag: + tagOff := d.off + _, _, tagNum := d.getHead() + contentOff := d.off + + switch tagNum { + case tagNumRFC3339Time, tagNumEpochTime: + d.off = tagOff + tm, _, err := d.parseToTime() + if err != nil { + return nil, err + } + + switch d.dm.timeTagToAny { + case TimeTagToTime: + return tm, nil + + case TimeTagToRFC3339: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format. E.g. year cannot exceed 9999, etc. + text, err := tm.Truncate(time.Second).MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format: %v", err) + } + return string(text), nil + + case TimeTagToRFC3339Nano: + if tagNum == 1 { + tm = tm.UTC() + } + // Call time.MarshalText() to format decoded time to RFC3339 format, + // and return error on time value that cannot be represented in + // RFC3339 format with sub-second precision. + text, err := tm.MarshalText() + if err != nil { + return nil, fmt.Errorf("cbor: decoded time cannot be represented in RFC3339 format with sub-second precision: %v", err) + } + return string(text), nil + + default: + // not reachable + } + + case tagNumUnsignedBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumNegativeBignum: + b, _ := d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + + if d.dm.bigIntDec == BigIntDecodePointer { + return bi, nil + } + return *bi, nil + + case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16: + // If conversion for interoperability with text encodings is not configured, + // treat tags 21-23 as unregistered tags. + if d.dm.byteStringToString == ByteStringToStringAllowedWithExpectedLaterEncoding || + d.dm.byteStringExpectedFormat != ByteStringExpectedFormatNone { + d.expectedLaterEncodingTags = append(d.expectedLaterEncodingTags, tagNum) + defer func() { + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:len(d.expectedLaterEncodingTags)-1] + }() + return d.parse(false) + } + } + + if d.dm.tags != nil { + // Parse to specified type if tag number is registered. + tagNums := []uint64{tagNum} + for d.nextCBORType() == cborTypeTag { + _, _, num := d.getHead() + tagNums = append(tagNums, num) + } + registeredType := d.dm.tags.getTypeFromTagNum(tagNums) + if registeredType != nil { + d.off = tagOff + rv := reflect.New(registeredType) + if err := d.parseToValue(rv.Elem(), getTypeInfo(registeredType)); err != nil { + return nil, err + } + return rv.Elem().Interface(), nil + } + } + + // Parse tag content + d.off = contentOff + content, err := d.parse(false) + if err != nil { + return nil, err + } + if d.dm.unrecognizedTagToAny == UnrecognizedTagContentToAny { + return content, nil + } + return Tag{tagNum, content}, nil + + case cborTypePrimitives: + _, ai, val := d.getHead() + if ai <= 24 && d.dm.simpleValues.rejected[SimpleValue(val)] { + return nil, &UnacceptableDataItemError{ + CBORType: t.String(), + Message: "simple value " + strconv.FormatInt(int64(val), 10) + " is not recognized", + } + } + if ai < 20 || ai == 24 { + return SimpleValue(val), nil + } + + switch ai { + case additionalInformationAsFalse, + additionalInformationAsTrue: + return (ai == additionalInformationAsTrue), nil + + case additionalInformationAsNull, + additionalInformationAsUndefined: + return nil, nil + + case additionalInformationAsFloat16: + f := float64(float16.Frombits(uint16(val)).Float32()) + return f, nil + + case additionalInformationAsFloat32: + f := float64(math.Float32frombits(uint32(val))) + return f, nil + + case additionalInformationAsFloat64: + f := math.Float64frombits(val) + return f, nil + } + + case cborTypeArray: + return d.parseArray() + + case cborTypeMap: + if d.dm.defaultMapType != nil { + m := reflect.New(d.dm.defaultMapType) + err := d.parseToValue(m, getTypeInfo(m.Elem().Type())) + if err != nil { + return nil, err + } + return m.Elem().Interface(), nil + } + return d.parseMap() + } + + return nil, nil +} + +// parseByteString parses a CBOR encoded byte string. The returned byte slice +// may be backed directly by the input. The second return value will be true if +// and only if the slice is backed by a copy of the input. Callers are +// responsible for making a copy if necessary. +func (d *decoder) parseByteString() ([]byte, bool) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + return b, false + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + b = append(b, d.data[d.off:d.off+int(val)]...) + d.off += int(val) + } + return b, true +} + +// applyByteStringTextConversion converts bytes read from a byte string to or from a configured text +// encoding. If no transformation was performed (because it was not required), the original byte +// slice is returned and the bool return value is false. Otherwise, a new slice containing the +// converted bytes is returned along with the bool value true. +func (d *decoder) applyByteStringTextConversion( + src []byte, + dstType reflect.Type, +) ( + dst []byte, + transformed bool, + err error, +) { + switch dstType.Kind() { + case reflect.String: + if d.dm.byteStringToString != ByteStringToStringAllowedWithExpectedLaterEncoding || len(d.expectedLaterEncodingTags) == 0 { + return src, false, nil + } + + switch d.expectedLaterEncodingTags[len(d.expectedLaterEncodingTags)-1] { + case tagNumExpectedLaterEncodingBase64URL: + encoded := make([]byte, base64.RawURLEncoding.EncodedLen(len(src))) + base64.RawURLEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase64: + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(encoded, src) + return encoded, true, nil + + case tagNumExpectedLaterEncodingBase16: + encoded := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(encoded, src) + return encoded, true, nil + + default: + // If this happens, there is a bug: the decoder has pushed an invalid + // "expected later encoding" tag to the stack. + panic(fmt.Sprintf("unrecognized expected later encoding tag: %d", d.expectedLaterEncodingTags)) + } + + case reflect.Slice: + if dstType.Elem().Kind() != reflect.Uint8 || len(d.expectedLaterEncodingTags) > 0 { + // Either the destination is not a slice of bytes, or the encoder that + // produced the input indicated an expected text encoding tag and therefore + // the content of the byte string has NOT been text encoded. + return src, false, nil + } + + switch d.dm.byteStringExpectedFormat { + case ByteStringExpectedBase64URL: + decoded := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + n, err := base64.RawURLEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64URL, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase64: + decoded := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + n, err := base64.StdEncoding.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase64, err) + } + return decoded[:n], true, nil + + case ByteStringExpectedBase16: + decoded := make([]byte, hex.DecodedLen(len(src))) + n, err := hex.Decode(decoded, src) + if err != nil { + return nil, false, newByteStringExpectedFormatError(ByteStringExpectedBase16, err) + } + return decoded[:n], true, nil + } + } + + return src, false, nil +} + +// parseTextString parses CBOR encoded text string. It returns a byte slice +// to prevent creating an extra copy of string. Caller should wrap returned +// byte slice as string when needed. +func (d *decoder) parseTextString() ([]byte, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + if !indefiniteLength { + b := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(b) { + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + return b, nil + } + // Process indefinite length string chunks. + b := []byte{} + for !d.foundBreak() { + _, _, val = d.getHead() + x := d.data[d.off : d.off+int(val)] + d.off += int(val) + if d.dm.utf8 == UTF8RejectInvalid && !utf8.Valid(x) { + for !d.foundBreak() { + d.skip() // Skip remaining chunk on error + } + return nil, &SemanticError{"cbor: invalid UTF-8 string"} + } + b = append(b, x...) + } + return b, nil +} + +func (d *decoder) parseArray() ([]interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + v := make([]interface{}, count) + var e interface{} + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + v[i] = e + } + return v, err +} + +func (d *decoder) parseArrayToSlice(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance + } + if v.IsNil() || v.Cap() < count || count == 0 { + v.Set(reflect.MakeSlice(tInfo.nonPtrType, count, count)) + } + v.SetLen(count) + var err error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + if lastErr := d.parseToValue(v.Index(i), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + } + return err +} + +func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + gi := 0 + vLen := v.Len() + var err error + for ci := 0; (hasSize && ci < count) || (!hasSize && !d.foundBreak()); ci++ { + if gi < vLen { + // Read CBOR array element and set array element + if lastErr := d.parseToValue(v.Index(gi), tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + } + gi++ + } else { + d.skip() // Skip remaining CBOR array element + } + } + // Set remaining Go array elements to zero values. + if gi < vLen { + zeroV := reflect.Zero(tInfo.elemTypeInfo.typ) + for ; gi < vLen; gi++ { + v.Index(gi).Set(zeroV) + } + } + return err +} + +func (d *decoder) parseMap() (interface{}, error) { + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + m := make(map[interface{}]interface{}) + var k, e interface{} + var err, lastErr error + keyCount := 0 + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if k, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + rv := reflect.ValueOf(k) + if !isHashableValue(rv) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + k, converted = convertByteSliceToByteString(k) + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{rv.Type().String()} + } + d.skip() + continue + } + } + + // Parse CBOR map value. + if e, lastErr = d.parse(true); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + m[k] = e + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := len(m) + if newKeyCount == keyCount { + m[k] = nil + err = &DupMapKeyError{k, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // Skip map key + d.skip() // Skip map value + } + return m, err + } + keyCount = newKeyCount + } + } + return m, err +} + +func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if v.IsNil() { + mapsize := count + if !hasSize { + mapsize = 0 + } + v.Set(reflect.MakeMapWithSize(tInfo.nonPtrType, mapsize)) + } + keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ + reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind) + var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value + keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable. + var err, lastErr error + keyCount := v.Len() + var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + existingKeys = make(map[interface{}]bool, keyCount) + if keyCount > 0 { + vKeys := v.MapKeys() + for i := 0; i < len(vKeys); i++ { + existingKeys[vKeys[i].Interface()] = true + } + } + } + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + // Parse CBOR map key. + if !keyValue.IsValid() { + keyValue = reflect.New(keyType).Elem() + } else if !reuseKey { + if !zeroKeyValue.IsValid() { + zeroKeyValue = reflect.Zero(keyType) + } + keyValue.Set(zeroKeyValue) + } + if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() + continue + } + + // Detect if CBOR map key can be used as Go map key. + if keyIsInterfaceType && keyValue.Elem().IsValid() { + if !isHashableValue(keyValue.Elem()) { + var converted bool + if d.dm.mapKeyByteString == MapKeyByteStringAllowed { + var k interface{} + k, converted = convertByteSliceToByteString(keyValue.Elem().Interface()) + if converted { + keyValue.Set(reflect.ValueOf(k)) + } + } + if !converted { + if err == nil { + err = &InvalidMapKeyTypeError{keyValue.Elem().Type().String()} + } + d.skip() + continue + } + } + } + + // Parse CBOR map value. + if !eleValue.IsValid() { + eleValue = reflect.New(eleType).Elem() + } else if !reuseEle { + if !zeroEleValue.IsValid() { + zeroEleValue = reflect.Zero(eleType) + } + eleValue.Set(zeroEleValue) + } + if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil { + if err == nil { + err = lastErr + } + continue + } + + // Add key-value pair to Go map. + v.SetMapIndex(keyValue, eleValue) + + // Detect duplicate map key. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + newKeyCount := v.Len() + if newKeyCount == keyCount { + kvi := keyValue.Interface() + if !existingKeys[kvi] { + v.SetMapIndex(keyValue, reflect.New(eleType).Elem()) + err = &DupMapKeyError{kvi, i} + i++ + // skip the rest of the map + for ; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + d.skip() // skip map key + d.skip() // skip map value + } + return err + } + delete(existingKeys, kvi) + } + keyCount = newKeyCount + } + } + return err +} + +func (d *decoder) parseArrayToStruct(v reflect.Value, tInfo *typeInfo) error { + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if !structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR array to struct without toarray option", + } + } + + start := d.off + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + if !hasSize { + count = d.numOfItemsUntilBreak() // peek ahead to get array size + } + if count != len(structType.fields) { + d.off = start + d.skip() + return &UnmarshalTypeError{ + CBORType: cborTypeArray.String(), + GoType: tInfo.typ.String(), + errorMsg: "cannot decode CBOR array to struct with different number of elements", + } + } + var err, lastErr error + for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ { + f := structType.fields[i] + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.typ.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// parseMapToStruct needs to be fast so gocyclo can be ignored for now. +func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo + structType := getDecodingStructType(tInfo.nonPtrType) + if structType.err != nil { + return structType.err + } + + if structType.toArray { + t := d.nextCBORType() + d.skip() + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: tInfo.nonPtrType.String(), + errorMsg: "cannot decode CBOR map to struct with toarray option", + } + } + + var err, lastErr error + + // Get CBOR map size + _, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + hasSize := !indefiniteLength + count := int(val) + + // Keeps track of matched struct fields + var foundFldIdx []bool + { + const maxStackFields = 128 + if nfields := len(structType.fields); nfields <= maxStackFields { + // For structs with typical field counts, expect that this can be + // stack-allocated. + var a [maxStackFields]bool + foundFldIdx = a[:nfields] + } else { + foundFldIdx = make([]bool, len(structType.fields)) + } + } + + // Keeps track of CBOR map keys to detect duplicate map key + keyCount := 0 + var mapKeys map[interface{}]struct{} + + errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0 + +MapEntryLoop: + for j := 0; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + var f *field + + // If duplicate field detection is enabled and the key at index j did not match any + // field, k will hold the map key. + var k interface{} + + t := d.nextCBORType() + if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) { + var keyBytes []byte + if t == cborTypeTextString { + keyBytes, lastErr = d.parseTextString() + if lastErr != nil { + if err == nil { + err = lastErr + } + d.skip() // skip value + continue + } + } else { // cborTypeByteString + keyBytes, _ = d.parseByteString() + } + + // Check for exact match on field name. + if i, ok := structType.fieldIndicesByName[string(keyBytes)]; ok { + fld := structType.fields[i] + + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{fld.name, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + } + + // Find field with case-insensitive match + if f == nil && d.dm.fieldNameMatching == FieldNameMatchingPreferCaseSensitive { + keyLen := len(keyBytes) + keyString := string(keyBytes) + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if len(fld.name) == keyLen && strings.EqualFold(fld.name, keyString) { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{keyString, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = string(keyBytes) + } + } else if t <= cborTypeNegativeInt { // uint/int + var nameAsInt int64 + + if t == cborTypePositiveInt { + _, _, val := d.getHead() + nameAsInt = int64(val) + } else { + _, _, val := d.getHead() + if val > math.MaxInt64 { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf(int64(0)).String(), + errorMsg: "-1-" + strconv.FormatUint(val, 10) + " overflows Go's int64", + } + } + d.skip() // skip value + continue + } + nameAsInt = int64(-1) ^ int64(val) + } + + // Find field + for i := 0; i < len(structType.fields); i++ { + fld := structType.fields[i] + if fld.keyAsInt && fld.nameAsInt == nameAsInt { + if !foundFldIdx[i] { + f = fld + foundFldIdx[i] = true + } else if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + err = &DupMapKeyError{nameAsInt, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } else { + // discard repeated match + d.skip() + continue MapEntryLoop + } + break + } + } + + if d.dm.dupMapKey == DupMapKeyEnforcedAPF && f == nil { + k = nameAsInt + } + } else { + if err == nil { + err = &UnmarshalTypeError{ + CBORType: t.String(), + GoType: reflect.TypeOf("").String(), + errorMsg: "map key is of type " + t.String() + " and cannot be used to match struct field name", + } + } + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + // parse key + k, lastErr = d.parse(true) + if lastErr != nil { + d.skip() // skip value + continue + } + // Detect if CBOR map key can be used as Go map key. + if !isHashableValue(reflect.ValueOf(k)) { + d.skip() // skip value + continue + } + } else { + d.skip() // skip key + } + } + + if f == nil { + if errOnUnknownField { + err = &UnknownFieldError{j} + d.skip() // Skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + + // Two map keys that match the same struct field are immediately considered + // duplicates. This check detects duplicates between two map keys that do + // not match a struct field. If unknown field errors are enabled, then this + // check is never reached. + if d.dm.dupMapKey == DupMapKeyEnforcedAPF { + if mapKeys == nil { + mapKeys = make(map[interface{}]struct{}, 1) + } + mapKeys[k] = struct{}{} + newKeyCount := len(mapKeys) + if newKeyCount == keyCount { + err = &DupMapKeyError{k, j} + d.skip() // skip value + j++ + // skip the rest of the map + for ; (hasSize && j < count) || (!hasSize && !d.foundBreak()); j++ { + d.skip() + d.skip() + } + return err + } + keyCount = newKeyCount + } + + d.skip() // Skip value + continue + } + + // Get field value by index + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + fv, lastErr = getFieldValue(v, f.idx, func(v reflect.Value) (reflect.Value, error) { + // Return a new value for embedded field null pointer to point to, or return error. + if !v.CanSet() { + return reflect.Value{}, errors.New("cbor: cannot set embedded pointer to unexported struct: " + v.Type().String()) + } + v.Set(reflect.New(v.Type().Elem())) + return v, nil + }) + if lastErr != nil && err == nil { + err = lastErr + } + if !fv.IsValid() { + d.skip() + continue + } + } + + if lastErr = d.parseToValue(fv, f.typInfo); lastErr != nil { + if err == nil { + if typeError, ok := lastErr.(*UnmarshalTypeError); ok { + typeError.StructFieldName = tInfo.nonPtrType.String() + "." + f.name + err = typeError + } else { + err = lastErr + } + } + } + } + return err +} + +// validRegisteredTagNums verifies that tag numbers match registered tag numbers of type t. +// validRegisteredTagNums assumes next CBOR data type is tag. It scans all tag numbers, and stops at tag content. +func (d *decoder) validRegisteredTagNums(registeredTag *tagItem) error { + // Scan until next cbor data is tag content. + tagNums := make([]uint64, 0, 1) + for d.nextCBORType() == cborTypeTag { + _, _, val := d.getHead() + tagNums = append(tagNums, val) + } + + if !registeredTag.equalTagNum(tagNums) { + return &WrongTagError{registeredTag.contentType, registeredTag.num, tagNums} + } + return nil +} + +func (d *decoder) getRegisteredTagItem(vt reflect.Type) *tagItem { + if d.dm.tags != nil { + return d.dm.tags.getTagItemFromType(vt) + } + return nil +} + +// skip moves data offset to the next item. skip assumes data is well-formed, +// and does not perform bounds checking. +func (d *decoder) skip() { + t, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag() + + if indefiniteLength { + switch t { + case cborTypeByteString, cborTypeTextString, cborTypeArray, cborTypeMap: + for { + if isBreakFlag(d.data[d.off]) { + d.off++ + return + } + d.skip() + } + } + } + + switch t { + case cborTypeByteString, cborTypeTextString: + d.off += int(val) + + case cborTypeArray: + for i := 0; i < int(val); i++ { + d.skip() + } + + case cborTypeMap: + for i := 0; i < int(val)*2; i++ { + d.skip() + } + + case cborTypeTag: + d.skip() + } +} + +func (d *decoder) getHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, +) { + t, ai, val = d.getHead() + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +// getHead assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) getHead() (t cborType, ai byte, val uint64) { + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + + if ai <= maxAdditionalInformationWithoutArgument { + return + } + + if ai == additionalInformationWith1ByteArgument { + val = uint64(d.data[d.off]) + d.off++ + return + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + return + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + return + } + return +} + +func (d *decoder) numOfItemsUntilBreak() int { + savedOff := d.off + i := 0 + for !d.foundBreak() { + d.skip() + i++ + } + d.off = savedOff + return i +} + +// foundBreak returns true if next byte is CBOR break code and moves cursor by 1, +// otherwise it returns false. +// foundBreak assumes data is well-formed, and does not perform bounds checking. +func (d *decoder) foundBreak() bool { + if isBreakFlag(d.data[d.off]) { + d.off++ + return true + } + return false +} + +func (d *decoder) reset(data []byte) { + d.data = data + d.off = 0 + d.expectedLaterEncodingTags = d.expectedLaterEncodingTags[:0] +} + +func (d *decoder) nextCBORType() cborType { + return getType(d.data[d.off]) +} + +func (d *decoder) nextCBORNil() bool { + return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7 +} + +var ( + typeIntf = reflect.TypeOf([]interface{}(nil)).Elem() + typeTime = reflect.TypeOf(time.Time{}) + typeBigInt = reflect.TypeOf(big.Int{}) + typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + typeString = reflect.TypeOf("") + typeByteSlice = reflect.TypeOf([]byte(nil)) +) + +func fillNil(_ cborType, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr: + v.Set(reflect.Zero(v.Type())) + return nil + } + return nil +} + +func fillPositiveInt(t cborType, val uint64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if val > math.MaxInt64 { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + if v.OverflowInt(int64(val)) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(int64(val)) + return nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if v.OverflowUint(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatUint(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetUint(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + + if v.Type() == typeBigInt { + i := new(big.Int).SetUint64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillNegativeInt(t cborType, val int64, v reflect.Value) error { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if v.OverflowInt(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatInt(val, 10) + " overflows " + v.Type().String(), + } + } + v.SetInt(val) + return nil + + case reflect.Float32, reflect.Float64: + f := float64(val) + v.SetFloat(f) + return nil + } + if v.Type() == typeBigInt { + i := new(big.Int).SetInt64(val) + v.Set(reflect.ValueOf(*i)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillBool(t cborType, val bool, v reflect.Value) error { + if v.Kind() == reflect.Bool { + v.SetBool(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillFloat(t cborType, val float64, v reflect.Value) error { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + if v.OverflowFloat(val) { + return &UnmarshalTypeError{ + CBORType: t.String(), + GoType: v.Type().String(), + errorMsg: strconv.FormatFloat(val, 'E', -1, 64) + " overflows " + v.Type().String(), + } + } + v.SetFloat(val) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error { + if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) { + if v.CanAddr() { + v = v.Addr() + if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok { + // The contract of BinaryUnmarshaler forbids + // retaining the input bytes, so no copying is + // required even if val is shared. + return u.UnmarshalBinary(val) + } + } + return errors.New("cbor: cannot set new value for " + v.Type().String()) + } + if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 { + src := val + if shared { + // SetBytes shares the underlying bytes of the source slice. + src = make([]byte, len(val)) + copy(src, val) + } + v.SetBytes(src) + return nil + } + if v.Kind() == reflect.Array && v.Type().Elem().Kind() == reflect.Uint8 { + vLen := v.Len() + i := 0 + for ; i < vLen && i < len(val); i++ { + v.Index(i).SetUint(uint64(val[i])) + } + // Set remaining Go array elements to zero values. + if i < vLen { + zeroV := reflect.Zero(reflect.TypeOf(byte(0))) + for ; i < vLen; i++ { + v.Index(i).Set(zeroV) + } + } + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func fillTextString(t cborType, val []byte, v reflect.Value) error { + if v.Kind() == reflect.String { + v.SetString(string(val)) + return nil + } + return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()} +} + +func isImmutableKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.String: + return true + + default: + return false + } +} + +func isHashableValue(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Slice, reflect.Map, reflect.Func: + return false + + case reflect.Struct: + switch rv.Type() { + case typeTag: + tag := rv.Interface().(Tag) + return isHashableValue(reflect.ValueOf(tag.Content)) + case typeBigInt: + return false + } + } + return true +} + +// convertByteSliceToByteString converts []byte to ByteString if +// - v is []byte type, or +// - v is Tag type and tag content type is []byte +// This function also handles nested tags. +// CBOR data is already verified to be well-formed before this function is used, +// so the recursion won't exceed max nested levels. +func convertByteSliceToByteString(v interface{}) (interface{}, bool) { + switch v := v.(type) { + case []byte: + return ByteString(v), true + + case Tag: + content, converted := convertByteSliceToByteString(v.Content) + if converted { + return Tag{Number: v.Number, Content: content}, true + } + } + return v, false +} diff --git a/vendor/github.com/fxamacker/cbor/v2/diagnose.go b/vendor/github.com/fxamacker/cbor/v2/diagnose.go new file mode 100644 index 00000000..44afb866 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/diagnose.go @@ -0,0 +1,724 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "math/big" + "strconv" + "unicode/utf16" + "unicode/utf8" + + "github.com/x448/float16" +) + +// DiagMode is the main interface for CBOR diagnostic notation. +type DiagMode interface { + // Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode. + Diagnose([]byte) (string, error) + + // DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. + DiagnoseFirst([]byte) (string, []byte, error) + + // DiagOptions returns user specified options used to create this DiagMode. + DiagOptions() DiagOptions +} + +// ByteStringEncoding specifies the base encoding that byte strings are notated. +type ByteStringEncoding uint8 + +const ( + // ByteStringBase16Encoding encodes byte strings in base16, without padding. + ByteStringBase16Encoding ByteStringEncoding = iota + + // ByteStringBase32Encoding encodes byte strings in base32, without padding. + ByteStringBase32Encoding + + // ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding. + ByteStringBase32HexEncoding + + // ByteStringBase64Encoding encodes byte strings in base64url, without padding. + ByteStringBase64Encoding + + maxByteStringEncoding +) + +func (bse ByteStringEncoding) valid() error { + if bse >= maxByteStringEncoding { + return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse))) + } + return nil +} + +// DiagOptions specifies Diag options. +type DiagOptions struct { + // ByteStringEncoding specifies the base encoding that byte strings are notated. + // Default is ByteStringBase16Encoding. + ByteStringEncoding ByteStringEncoding + + // ByteStringHexWhitespace specifies notating with whitespace in byte string + // when ByteStringEncoding is ByteStringBase16Encoding. + ByteStringHexWhitespace bool + + // ByteStringText specifies notating with text in byte string + // if it is a valid UTF-8 text. + ByteStringText bool + + // ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string + // if it is a valid CBOR bytes. + ByteStringEmbeddedCBOR bool + + // CBORSequence specifies notating CBOR sequences. + // otherwise, it returns an error if there are more bytes after the first CBOR. + CBORSequence bool + + // FloatPrecisionIndicator specifies appending a suffix to indicate float precision. + // Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators. + FloatPrecisionIndicator bool + + // MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags. + // Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can + // require larger amounts of stack to deserialize. Don't increase this higher than you require. + MaxNestedLevels int + + // MaxArrayElements specifies the max number of elements for CBOR arrays. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxArrayElements int + + // MaxMapPairs specifies the max number of key-value pairs for CBOR maps. + // Default is 128*1024=131072 and it can be set to [16, 2147483647] + MaxMapPairs int +} + +// DiagMode returns a DiagMode with immutable options. +func (opts DiagOptions) DiagMode() (DiagMode, error) { + return opts.diagMode() +} + +func (opts DiagOptions) diagMode() (*diagMode, error) { + if err := opts.ByteStringEncoding.valid(); err != nil { + return nil, err + } + + decMode, err := DecOptions{ + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.decMode() + if err != nil { + return nil, err + } + + return &diagMode{ + byteStringEncoding: opts.ByteStringEncoding, + byteStringHexWhitespace: opts.ByteStringHexWhitespace, + byteStringText: opts.ByteStringText, + byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR, + cborSequence: opts.CBORSequence, + floatPrecisionIndicator: opts.FloatPrecisionIndicator, + decMode: decMode, + }, nil +} + +type diagMode struct { + byteStringEncoding ByteStringEncoding + byteStringHexWhitespace bool + byteStringText bool + byteStringEmbeddedCBOR bool + cborSequence bool + floatPrecisionIndicator bool + decMode *decMode +} + +// DiagOptions returns user specified options used to create this DiagMode. +func (dm *diagMode) DiagOptions() DiagOptions { + return DiagOptions{ + ByteStringEncoding: dm.byteStringEncoding, + ByteStringHexWhitespace: dm.byteStringHexWhitespace, + ByteStringText: dm.byteStringText, + ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR, + CBORSequence: dm.cborSequence, + FloatPrecisionIndicator: dm.floatPrecisionIndicator, + MaxNestedLevels: dm.decMode.maxNestedLevels, + MaxArrayElements: dm.decMode.maxArrayElements, + MaxMapPairs: dm.decMode.maxMapPairs, + } +} + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode. +func (dm *diagMode) Diagnose(data []byte) (string, error) { + return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence) +} + +// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return newDiagnose(data, dm.decMode, dm).diagFirst() +} + +var defaultDiagMode, _ = DiagOptions{}.diagMode() + +// Diagnose returns extended diagnostic notation (EDN) of CBOR data items +// using the default diagnostic mode. +// +// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation. +func Diagnose(data []byte) (string, error) { + return defaultDiagMode.Diagnose(data) +} + +// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest. +func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) { + return defaultDiagMode.DiagnoseFirst(data) +} + +type diagnose struct { + dm *diagMode + d *decoder + w *bytes.Buffer +} + +func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose { + return &diagnose{ + dm: diagm, + d: &decoder{data: data, dm: decm}, + w: &bytes.Buffer{}, + } +} + +func (di *diagnose) diag(cborSequence bool) (string, error) { + // CBOR Sequence + firstItem := true + for { + switch err := di.wellformed(cborSequence); err { + case nil: + if !firstItem { + di.w.WriteString(", ") + } + firstItem = false + if itemErr := di.item(); itemErr != nil { + return di.w.String(), itemErr + } + + case io.EOF: + if firstItem { + return di.w.String(), err + } + return di.w.String(), nil + + default: + return di.w.String(), err + } + } +} + +func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) { + err = di.wellformed(true) + if err == nil { + err = di.item() + } + + if err == nil { + // Return EDN and the rest of the data slice (which might be len 0) + return di.w.String(), di.d.data[di.d.off:], nil + } + + return di.w.String(), nil, err +} + +func (di *diagnose) wellformed(allowExtraData bool) error { + off := di.d.off + err := di.d.wellformed(allowExtraData, false) + di.d.off = off + return err +} + +func (di *diagnose) item() error { //nolint:gocyclo + initialByte := di.d.data[di.d.off] + switch initialByte { + case cborByteStringWithIndefiniteLengthHead, + cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string + di.d.off++ + if isBreakFlag(di.d.data[di.d.off]) { + di.d.off++ + switch initialByte { + case cborByteStringWithIndefiniteLengthHead: + // indefinite-length bytes with no chunks. + di.w.WriteString(`''_`) + return nil + case cborTextStringWithIndefiniteLengthHead: + // indefinite-length text with no chunks. + di.w.WriteString(`""_`) + return nil + } + } + + di.w.WriteString("(_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // wellformedIndefiniteString() already checked that the next item is a byte/text string. + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(')') + return nil + + case cborArrayWithIndefiniteLengthHead: // indefinite-length array + di.d.off++ + di.w.WriteString("[_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte(']') + return nil + + case cborMapWithIndefiniteLengthHead: // indefinite-length map + di.d.off++ + di.w.WriteString("{_ ") + + i := 0 + for !di.d.foundBreak() { + if i > 0 { + di.w.WriteString(", ") + } + + i++ + // key + if err := di.item(); err != nil { + return err + } + + di.w.WriteString(": ") + + // value + if err := di.item(); err != nil { + return err + } + } + + di.w.WriteByte('}') + return nil + } + + t := di.d.nextCBORType() + switch t { + case cborTypePositiveInt: + _, _, val := di.d.getHead() + di.w.WriteString(strconv.FormatUint(val, 10)) + return nil + + case cborTypeNegativeInt: + _, _, val := di.d.getHead() + if val > math.MaxInt64 { + // CBOR negative integer overflows int64, use big.Int to store value. + bi := new(big.Int) + bi.SetUint64(val) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + } + + nValue := int64(-1) ^ int64(val) + di.w.WriteString(strconv.FormatInt(nValue, 10)) + return nil + + case cborTypeByteString: + b, _ := di.d.parseByteString() + return di.encodeByteString(b) + + case cborTypeTextString: + b, err := di.d.parseTextString() + if err != nil { + return err + } + return di.encodeTextString(string(b), '"') + + case cborTypeArray: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('[') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte(']') + return nil + + case cborTypeMap: + _, _, val := di.d.getHead() + count := int(val) + di.w.WriteByte('{') + + for i := 0; i < count; i++ { + if i > 0 { + di.w.WriteString(", ") + } + // key + if err := di.item(); err != nil { + return err + } + di.w.WriteString(": ") + // value + if err := di.item(); err != nil { + return err + } + } + di.w.WriteByte('}') + return nil + + case cborTypeTag: + _, _, tagNum := di.d.getHead() + switch tagNum { + case tagNumUnsignedBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumUnsignedBignum, + "byte string", + nt.String()) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + di.w.WriteString(bi.String()) + return nil + + case tagNumNegativeBignum: + if nt := di.d.nextCBORType(); nt != cborTypeByteString { + return newInadmissibleTagContentTypeError( + tagNumNegativeBignum, + "byte string", + nt.String(), + ) + } + + b, _ := di.d.parseByteString() + bi := new(big.Int).SetBytes(b) + bi.Add(bi, big.NewInt(1)) + bi.Neg(bi) + di.w.WriteString(bi.String()) + return nil + + default: + di.w.WriteString(strconv.FormatUint(tagNum, 10)) + di.w.WriteByte('(') + if err := di.item(); err != nil { + return err + } + di.w.WriteByte(')') + return nil + } + + case cborTypePrimitives: + _, ai, val := di.d.getHead() + switch ai { + case additionalInformationAsFalse: + di.w.WriteString("false") + return nil + + case additionalInformationAsTrue: + di.w.WriteString("true") + return nil + + case additionalInformationAsNull: + di.w.WriteString("null") + return nil + + case additionalInformationAsUndefined: + di.w.WriteString("undefined") + return nil + + case additionalInformationAsFloat16, + additionalInformationAsFloat32, + additionalInformationAsFloat64: + return di.encodeFloat(ai, val) + + default: + di.w.WriteString("simple(") + di.w.WriteString(strconv.FormatUint(val, 10)) + di.w.WriteByte(')') + return nil + } + } + + return nil +} + +// writeU16 format a rune as "\uxxxx" +func (di *diagnose) writeU16(val rune) { + di.w.WriteString("\\u") + var in [2]byte + in[0] = byte(val >> 8) + in[1] = byte(val) + sz := hex.EncodedLen(len(in)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, in[:]) + di.w.Write(dst) +} + +var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding) +var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) + +func (di *diagnose) encodeByteString(val []byte) error { + if len(val) > 0 { + if di.dm.byteStringText && utf8.Valid(val) { + return di.encodeTextString(string(val), '\'') + } + + if di.dm.byteStringEmbeddedCBOR { + di2 := newDiagnose(val, di.dm.decMode, di.dm) + // should always notating embedded CBOR sequence. + if str, err := di2.diag(true); err == nil { + di.w.WriteString("<<") + di.w.WriteString(str) + di.w.WriteString(">>") + return nil + } + } + } + + switch di.dm.byteStringEncoding { + case ByteStringBase16Encoding: + di.w.WriteString("h'") + if di.dm.byteStringHexWhitespace { + sz := hex.EncodedLen(len(val)) + if len(val) > 0 { + sz += len(val) - 1 + } + di.w.Grow(sz) + + dst := di.w.Bytes()[di.w.Len():] + for i := range val { + if i > 0 { + dst = append(dst, ' ') + } + hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1]) + dst = dst[:len(dst)+2] + } + di.w.Write(dst) + } else { + sz := hex.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + hex.Encode(dst, val) + di.w.Write(dst) + } + di.w.WriteByte('\'') + return nil + + case ByteStringBase32Encoding: + di.w.WriteString("b32'") + sz := rawBase32Encoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32Encoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase32HexEncoding: + di.w.WriteString("h32'") + sz := rawBase32HexEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + rawBase32HexEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + case ByteStringBase64Encoding: + di.w.WriteString("b64'") + sz := base64.RawURLEncoding.EncodedLen(len(val)) + di.w.Grow(sz) + dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz] + base64.RawURLEncoding.Encode(dst, val) + di.w.Write(dst) + di.w.WriteByte('\'') + return nil + + default: + // It should not be possible for users to construct a *diagMode with an invalid byte + // string encoding. + panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding)) + } +} + +const utf16SurrSelf = rune(0x10000) + +// quote should be either `'` or `"` +func (di *diagnose) encodeTextString(val string, quote byte) error { + di.w.WriteByte(quote) + + for i := 0; i < len(val); { + if b := val[i]; b < utf8.RuneSelf { + switch { + case b == '\t', b == '\n', b == '\r', b == '\\', b == quote: + di.w.WriteByte('\\') + + switch b { + case '\t': + b = 't' + case '\n': + b = 'n' + case '\r': + b = 'r' + } + di.w.WriteByte(b) + + case b >= ' ' && b <= '~': + di.w.WriteByte(b) + + default: + di.writeU16(rune(b)) + } + + i++ + continue + } + + c, size := utf8.DecodeRuneInString(val[i:]) + switch { + case c == utf8.RuneError: + return &SemanticError{"cbor: invalid UTF-8 string"} + + case c < utf16SurrSelf: + di.writeU16(c) + + default: + c1, c2 := utf16.EncodeRune(c) + di.writeU16(c1) + di.writeU16(c2) + } + + i += size + } + + di.w.WriteByte(quote) + return nil +} + +func (di *diagnose) encodeFloat(ai byte, val uint64) error { + f64 := float64(0) + switch ai { + case additionalInformationAsFloat16: + f16 := float16.Frombits(uint16(val)) + switch { + case f16.IsNaN(): + di.w.WriteString("NaN") + return nil + case f16.IsInf(1): + di.w.WriteString("Infinity") + return nil + case f16.IsInf(-1): + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f16.Float32()) + } + + case additionalInformationAsFloat32: + f32 := math.Float32frombits(uint32(val)) + switch { + case f32 != f32: + di.w.WriteString("NaN") + return nil + case f32 > math.MaxFloat32: + di.w.WriteString("Infinity") + return nil + case f32 < -math.MaxFloat32: + di.w.WriteString("-Infinity") + return nil + default: + f64 = float64(f32) + } + + case additionalInformationAsFloat64: + f64 = math.Float64frombits(val) + switch { + case f64 != f64: + di.w.WriteString("NaN") + return nil + case f64 > math.MaxFloat64: + di.w.WriteString("Infinity") + return nil + case f64 < -math.MaxFloat64: + di.w.WriteString("-Infinity") + return nil + } + } + // Use ES6 number to string conversion which should match most JSON generators. + // Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585 + const bitSize = 64 + b := make([]byte, 0, 32) + if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) { + b = strconv.AppendFloat(b, f64, 'e', -1, bitSize) + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && string(b[n-4:n-1]) == "e-0" { + b = append(b[:n-2], b[n-1]) + } + } else { + b = strconv.AppendFloat(b, f64, 'f', -1, bitSize) + } + + // add decimal point and trailing zero if needed + if bytes.IndexByte(b, '.') < 0 { + if i := bytes.IndexByte(b, 'e'); i < 0 { + b = append(b, '.', '0') + } else { + b = append(b[:i+2], b[i:]...) + b[i] = '.' + b[i+1] = '0' + } + } + + di.w.WriteString(string(b)) + + if di.dm.floatPrecisionIndicator { + switch ai { + case additionalInformationAsFloat16: + di.w.WriteString("_1") + return nil + + case additionalInformationAsFloat32: + di.w.WriteString("_2") + return nil + + case additionalInformationAsFloat64: + di.w.WriteString("_3") + return nil + } + } + + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/doc.go b/vendor/github.com/fxamacker/cbor/v2/doc.go new file mode 100644 index 00000000..23f68b98 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/doc.go @@ -0,0 +1,129 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +/* +Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags, +Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding, +CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection. + +Encoding options allow "preferred serialization" by encoding integers and floats +to their smallest forms (e.g. float16) when values fit. + +Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller +and easier to use with structs. + +For example, "toarray" tag makes struct fields encode to CBOR array elements. And +"keyasint" makes a field encode to an element of CBOR map with specified int key. + +Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go + +# Basics + +The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start + +Function signatures identical to encoding/json include: + + Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode. + +Standard interfaces include: + + BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler. + +Custom encoding and decoding is possible by implementing standard interfaces for +user-defined Go types. + +Codec functions are available at package-level (using defaults options) or by +creating modes from options at runtime. + +"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode). + +EncMode and DecMode interfaces are created from EncOptions or DecOptions structs. + + em, err := cbor.EncOptions{...}.EncMode() + em, err := cbor.CanonicalEncOptions().EncMode() + em, err := cbor.CTAP2EncOptions().EncMode() + +Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of +modes won't accidentally change at runtime after they're created. + +Modes are intended to be reused and are safe for concurrent use. + +EncMode and DecMode Interfaces + + // EncMode interface uses immutable options and is safe for concurrent use. + type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions // returns copy of options + } + + // DecMode interface uses immutable options and is safe for concurrent use. + type DecMode interface { + Unmarshal(data []byte, v interface{}) error + NewDecoder(r io.Reader) *Decoder + DecOptions() DecOptions // returns copy of options + } + +Using Default Encoding Mode + + b, err := cbor.Marshal(v) + + encoder := cbor.NewEncoder(w) + err = encoder.Encode(v) + +Using Default Decoding Mode + + err := cbor.Unmarshal(b, &v) + + decoder := cbor.NewDecoder(r) + err = decoder.Decode(&v) + +Creating and Using Encoding Modes + + // Create EncOptions using either struct literal or a function. + opts := cbor.CanonicalEncOptions() + + // If needed, modify encoding options + opts.Time = cbor.TimeUnix + + // Create reusable EncMode interface with immutable options, safe for concurrent use. + em, err := opts.EncMode() + + // Use EncMode like encoding/json, with same function signatures. + b, err := em.Marshal(v) + // or + encoder := em.NewEncoder(w) + err := encoder.Encode(v) + + // NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options + // specified during creation of em (encoding mode). + +# CBOR Options + +Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options + +Encoding Options: https://github.com/fxamacker/cbor#encoding-options + +Decoding Options: https://github.com/fxamacker/cbor#decoding-options + +# Struct Tags + +Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected. +If both struct tags are specified then `cbor` is used. + +Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use +very compact formats like COSE and CWT (CBOR Web Tokens) with structs. + +For example, "toarray" makes struct fields encode to array elements. And "keyasint" +makes struct fields encode to elements of CBOR map with int keys. + +https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png + +Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1 + +# Tests and Fuzzing + +Over 375 tests are included in this package. Cover-guided fuzzing is handled by +a private fuzzer that replaced fxamacker/cbor-fuzz years ago. +*/ +package cbor diff --git a/vendor/github.com/fxamacker/cbor/v2/encode.go b/vendor/github.com/fxamacker/cbor/v2/encode.go new file mode 100644 index 00000000..6508e291 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode.go @@ -0,0 +1,1989 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/big" + "math/rand" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/x448/float16" +) + +// Marshal returns the CBOR encoding of v using default encoding options. +// See EncOptions for encoding options. +// +// Marshal uses the following encoding rules: +// +// If value implements the Marshaler interface, Marshal calls its +// MarshalCBOR method. +// +// If value implements encoding.BinaryMarshaler, Marhsal calls its +// MarshalBinary method and encode it as CBOR byte string. +// +// Boolean values encode as CBOR booleans (type 7). +// +// Positive integer values encode as CBOR positive integers (type 0). +// +// Negative integer values encode as CBOR negative integers (type 1). +// +// Floating point values encode as CBOR floating points (type 7). +// +// String values encode as CBOR text strings (type 3). +// +// []byte values encode as CBOR byte strings (type 2). +// +// Array and slice values encode as CBOR arrays (type 4). +// +// Map values encode as CBOR maps (type 5). +// +// Struct values encode as CBOR maps (type 5). Each exported struct field +// becomes a pair with field name encoded as CBOR text string (type 3) and +// field value encoded based on its type. See struct tag option "keyasint" +// to encode field name as CBOR integer (type 0 and 1). Also see struct +// tag option "toarray" for special field "_" to encode struct values as +// CBOR array (type 4). +// +// Marshal supports format string stored under the "cbor" key in the struct +// field's tag. CBOR format string can specify the name of the field, +// "omitempty" and "keyasint" options, and special case "-" for field omission. +// If "cbor" key is absent, Marshal uses "json" key. +// +// Struct field name is treated as integer if it has "keyasint" option in +// its format string. The format string must specify an integer as its +// field name. +// +// Special struct field "_" is used to specify struct level options, such as +// "toarray". "toarray" option enables Go struct to be encoded as CBOR array. +// "omitempty" is disabled by "toarray" to ensure that the same number +// of elements are encoded every time. +// +// Anonymous struct fields are marshaled as if their exported fields +// were fields in the outer struct. Marshal follows the same struct fields +// visibility rules used by JSON encoding package. +// +// time.Time values encode as text strings specified in RFC3339 or numerical +// representation of seconds since January 1, 1970 UTC depending on +// EncOptions.Time setting. Also See EncOptions.TimeTag to encode +// time.Time as CBOR tag with tag number 0 or 1. +// +// big.Int values encode as CBOR integers (type 0 and 1) if values fit. +// Otherwise, big.Int values encode as CBOR bignums (tag 2 and 3). See +// EncOptions.BigIntConvert to always encode big.Int values as CBOR +// bignums. +// +// Pointer values encode as the value pointed to. +// +// Interface values encode as the value stored in the interface. +// +// Nil slice/map/pointer/interface values encode as CBOR nulls (type 7). +// +// Values of other types cannot be encoded in CBOR. Attempting +// to encode such a value causes Marshal to return an UnsupportedTypeError. +func Marshal(v interface{}) ([]byte, error) { + return defaultEncMode.Marshal(v) +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses default encoding options. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + return defaultEncMode.MarshalToBuffer(v, buf) +} + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid CBOR. +type Marshaler interface { + MarshalCBOR() ([]byte, error) +} + +// MarshalerError represents error from checking encoded CBOR data item +// returned from MarshalCBOR for well-formedness and some very limited tag validation. +type MarshalerError struct { + typ reflect.Type + err error +} + +func (e *MarshalerError) Error() string { + return "cbor: error calling MarshalCBOR for type " + + e.typ.String() + + ": " + e.err.Error() +} + +func (e *MarshalerError) Unwrap() error { + return e.err +} + +// UnsupportedTypeError is returned by Marshal when attempting to encode value +// of an unsupported type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "cbor: unsupported type: " + e.Type.String() +} + +// UnsupportedValueError is returned by Marshal when attempting to encode an +// unsupported value. +type UnsupportedValueError struct { + msg string +} + +func (e *UnsupportedValueError) Error() string { + return "cbor: unsupported value: " + e.msg +} + +// SortMode identifies supported sorting order. +type SortMode int + +const ( + // SortNone encodes map pairs and struct fields in an arbitrary order. + SortNone SortMode = 0 + + // SortLengthFirst causes map keys or struct fields to be sorted such that: + // - If two keys have different lengths, the shorter one sorts earlier; + // - If two keys have the same length, the one with the lower value in + // (byte-wise) lexical order sorts earlier. + // It is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortLengthFirst SortMode = 1 + + // SortBytewiseLexical causes map keys or struct fields to be sorted in the + // bytewise lexicographic order of their deterministic CBOR encodings. + // It is used in "CTAP2 Canonical CBOR" and "Core Deterministic Encoding" + // in RFC 7049bis. + SortBytewiseLexical SortMode = 2 + + // SortShuffle encodes map pairs and struct fields in a shuffled + // order. This mode does not guarantee an unbiased permutation, but it + // does guarantee that the runtime of the shuffle algorithm used will be + // constant. + SortFastShuffle SortMode = 3 + + // SortCanonical is used in "Canonical CBOR" encoding in RFC 7049 3.9. + SortCanonical SortMode = SortLengthFirst + + // SortCTAP2 is used in "CTAP2 Canonical CBOR". + SortCTAP2 SortMode = SortBytewiseLexical + + // SortCoreDeterministic is used in "Core Deterministic Encoding" in RFC 7049bis. + SortCoreDeterministic SortMode = SortBytewiseLexical + + maxSortMode SortMode = 4 +) + +func (sm SortMode) valid() bool { + return sm >= 0 && sm < maxSortMode +} + +// StringMode specifies how to encode Go string values. +type StringMode int + +const ( + // StringToTextString encodes Go string to CBOR text string (major type 3). + StringToTextString StringMode = iota + + // StringToByteString encodes Go string to CBOR byte string (major type 2). + StringToByteString +) + +func (st StringMode) cborType() (cborType, error) { + switch st { + case StringToTextString: + return cborTypeTextString, nil + + case StringToByteString: + return cborTypeByteString, nil + } + return 0, errors.New("cbor: invalid StringType " + strconv.Itoa(int(st))) +} + +// ShortestFloatMode specifies which floating-point format should +// be used as the shortest possible format for CBOR encoding. +// It is not used for encoding Infinity and NaN values. +type ShortestFloatMode int + +const ( + // ShortestFloatNone makes float values encode without any conversion. + // This is the default for ShortestFloatMode in v1. + // E.g. a float32 in Go will encode to CBOR float32. And + // a float64 in Go will encode to CBOR float64. + ShortestFloatNone ShortestFloatMode = iota + + // ShortestFloat16 specifies float16 as the shortest form that preserves value. + // E.g. if float64 can convert to float32 while preserving value, then + // encoding will also try to convert float32 to float16. So a float64 might + // encode as CBOR float64, float32 or float16 depending on the value. + ShortestFloat16 + + maxShortestFloat +) + +func (sfm ShortestFloatMode) valid() bool { + return sfm >= 0 && sfm < maxShortestFloat +} + +// NaNConvertMode specifies how to encode NaN and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type NaNConvertMode int + +const ( + // NaNConvert7e00 always encodes NaN to 0xf97e00 (CBOR float16 = 0x7e00). + NaNConvert7e00 NaNConvertMode = iota + + // NaNConvertNone never modifies or converts NaN to other representations + // (float64 NaN stays float64, etc. even if it can use float16 without losing + // any bits). + NaNConvertNone + + // NaNConvertPreserveSignal converts NaN to the smallest form that preserves + // value (quiet bit + payload) as described in RFC 7049bis Draft 12. + NaNConvertPreserveSignal + + // NaNConvertQuiet always forces quiet bit = 1 and shortest form that preserves + // NaN payload. + NaNConvertQuiet + + // NaNConvertReject returns UnsupportedValueError on attempts to encode a NaN value. + NaNConvertReject + + maxNaNConvert +) + +func (ncm NaNConvertMode) valid() bool { + return ncm >= 0 && ncm < maxNaNConvert +} + +// InfConvertMode specifies how to encode Infinity and overrides ShortestFloatMode. +// ShortestFloatMode is not used for encoding Infinity and NaN values. +type InfConvertMode int + +const ( + // InfConvertFloat16 always converts Inf to lossless IEEE binary16 (float16). + InfConvertFloat16 InfConvertMode = iota + + // InfConvertNone never converts (used by CTAP2 Canonical CBOR). + InfConvertNone + + // InfConvertReject returns UnsupportedValueError on attempts to encode an infinite value. + InfConvertReject + + maxInfConvert +) + +func (icm InfConvertMode) valid() bool { + return icm >= 0 && icm < maxInfConvert +} + +// TimeMode specifies how to encode time.Time values. +type TimeMode int + +const ( + // TimeUnix causes time.Time to be encoded as epoch time in integer with second precision. + TimeUnix TimeMode = iota + + // TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision. + TimeUnixMicro + + // TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds, + // otherwise float-point rounded to microsecond precision. + TimeUnixDynamic + + // TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision. + TimeRFC3339 + + // TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision. + TimeRFC3339Nano + + maxTimeMode +) + +func (tm TimeMode) valid() bool { + return tm >= 0 && tm < maxTimeMode +} + +// BigIntConvertMode specifies how to encode big.Int values. +type BigIntConvertMode int + +const ( + // BigIntConvertShortest makes big.Int encode to CBOR integer if value fits. + // E.g. if big.Int value can be converted to CBOR integer while preserving + // value, encoder will encode it to CBOR integer (major type 0 or 1). + BigIntConvertShortest BigIntConvertMode = iota + + // BigIntConvertNone makes big.Int encode to CBOR bignum (tag 2 or 3) without + // converting it to another CBOR type. + BigIntConvertNone + + // BigIntConvertReject returns an UnsupportedTypeError instead of marshaling a big.Int. + BigIntConvertReject + + maxBigIntConvert +) + +func (bim BigIntConvertMode) valid() bool { + return bim >= 0 && bim < maxBigIntConvert +} + +// NilContainersMode specifies how to encode nil slices and maps. +type NilContainersMode int + +const ( + // NilContainerAsNull encodes nil slices and maps as CBOR null. + // This is the default. + NilContainerAsNull NilContainersMode = iota + + // NilContainerAsEmpty encodes nil slices and maps as + // empty container (CBOR bytestring, array, or map). + NilContainerAsEmpty + + maxNilContainersMode +) + +func (m NilContainersMode) valid() bool { + return m >= 0 && m < maxNilContainersMode +} + +// OmitEmptyMode specifies how to encode struct fields with omitempty tag. +// The default behavior omits if field value would encode as empty CBOR value. +type OmitEmptyMode int + +const ( + // OmitEmptyCBORValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field would be encoded as an empty + // CBOR value, such as CBOR false, 0, 0.0, nil, empty byte, empty string, + // empty array, or empty map. + OmitEmptyCBORValue OmitEmptyMode = iota + + // OmitEmptyGoValue specifies that struct fields tagged with "omitempty" + // should be omitted from encoding if the field has an empty Go value, + // defined as false, 0, 0.0, a nil pointer, a nil interface value, and + // any empty array, slice, map, or string. + // This behavior is the same as the current (aka v1) encoding/json package + // included in Go. + OmitEmptyGoValue + + maxOmitEmptyMode +) + +func (om OmitEmptyMode) valid() bool { + return om >= 0 && om < maxOmitEmptyMode +} + +// FieldNameMode specifies the CBOR type to use when encoding struct field names. +type FieldNameMode int + +const ( + // FieldNameToTextString encodes struct fields to CBOR text string (major type 3). + FieldNameToTextString FieldNameMode = iota + + // FieldNameToTextString encodes struct fields to CBOR byte string (major type 2). + FieldNameToByteString + + maxFieldNameMode +) + +func (fnm FieldNameMode) valid() bool { + return fnm >= 0 && fnm < maxFieldNameMode +} + +// ByteSliceLaterFormatMode specifies which later format conversion hint (CBOR tag 21-23) +// to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will +// always encode unmodified bytes from the byte slice and just wrap it within +// CBOR tag 21, 22, or 23 if specified. +// See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. +type ByteSliceLaterFormatMode int + +const ( + // ByteSliceLaterFormatNone encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // without adding CBOR tag 21, 22, or 23. + ByteSliceLaterFormatNone ByteSliceLaterFormatMode = iota + + // ByteSliceLaterFormatBase64URL encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 21 (expected later conversion to base64url encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64URL + + // ByteSliceLaterFormatBase64 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 22 (expected later conversion to base64 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase64 + + // ByteSliceLaterFormatBase16 encodes unmodified bytes from Go byte slice to CBOR byte string (major type 2) + // inside CBOR tag 23 (expected later conversion to base16 encoding, see RFC 8949 Section 3.4.5.2). + ByteSliceLaterFormatBase16 +) + +func (bsefm ByteSliceLaterFormatMode) encodingTag() (uint64, error) { + switch bsefm { + case ByteSliceLaterFormatNone: + return 0, nil + + case ByteSliceLaterFormatBase64URL: + return tagNumExpectedLaterEncodingBase64URL, nil + + case ByteSliceLaterFormatBase64: + return tagNumExpectedLaterEncodingBase64, nil + + case ByteSliceLaterFormatBase16: + return tagNumExpectedLaterEncodingBase16, nil + } + return 0, errors.New("cbor: invalid ByteSliceLaterFormat " + strconv.Itoa(int(bsefm))) +} + +// ByteArrayMode specifies how to encode byte arrays. +type ByteArrayMode int + +const ( + // ByteArrayToByteSlice encodes byte arrays the same way that a byte slice with identical + // length and contents is encoded. + ByteArrayToByteSlice ByteArrayMode = iota + + // ByteArrayToArray encodes byte arrays to the CBOR array type with one unsigned integer + // item for each byte in the array. + ByteArrayToArray + + maxByteArrayMode +) + +func (bam ByteArrayMode) valid() bool { + return bam >= 0 && bam < maxByteArrayMode +} + +// BinaryMarshalerMode specifies how to encode types that implement encoding.BinaryMarshaler. +type BinaryMarshalerMode int + +const ( + // BinaryMarshalerByteString encodes the output of MarshalBinary to a CBOR byte string. + BinaryMarshalerByteString BinaryMarshalerMode = iota + + // BinaryMarshalerNone does not recognize BinaryMarshaler implementations during encode. + BinaryMarshalerNone + + maxBinaryMarshalerMode +) + +func (bmm BinaryMarshalerMode) valid() bool { + return bmm >= 0 && bmm < maxBinaryMarshalerMode +} + +// EncOptions specifies encoding options. +type EncOptions struct { + // Sort specifies sorting order. + Sort SortMode + + // ShortestFloat specifies the shortest floating-point encoding that preserves + // the value being encoded. + ShortestFloat ShortestFloatMode + + // NaNConvert specifies how to encode NaN and it overrides ShortestFloatMode. + NaNConvert NaNConvertMode + + // InfConvert specifies how to encode Inf and it overrides ShortestFloatMode. + InfConvert InfConvertMode + + // BigIntConvert specifies how to encode big.Int values. + BigIntConvert BigIntConvertMode + + // Time specifies how to encode time.Time. + Time TimeMode + + // TimeTag allows time.Time to be encoded with a tag number. + // RFC3339 format gets tag number 0, and numeric epoch time tag number 1. + TimeTag EncTagMode + + // IndefLength specifies whether to allow indefinite length CBOR items. + IndefLength IndefLengthMode + + // NilContainers specifies how to encode nil slices and maps. + NilContainers NilContainersMode + + // TagsMd specifies whether to allow CBOR tags (major type 6). + TagsMd TagsMode + + // OmitEmptyMode specifies how to encode struct fields with omitempty tag. + OmitEmpty OmitEmptyMode + + // String specifies which CBOR type to use when encoding Go strings. + // - CBOR text string (major type 3) is default + // - CBOR byte string (major type 2) + String StringMode + + // FieldName specifies the CBOR type to use when encoding struct field names. + FieldName FieldNameMode + + // ByteSliceLaterFormat specifies which later format conversion hint (CBOR tag 21-23) + // to include (if any) when encoding Go byte slice to CBOR byte string. The encoder will + // always encode unmodified bytes from the byte slice and just wrap it within + // CBOR tag 21, 22, or 23 if specified. + // See "Expected Later Encoding for CBOR-to-JSON Converters" in RFC 8949 Section 3.4.5.2. + ByteSliceLaterFormat ByteSliceLaterFormatMode + + // ByteArray specifies how to encode byte arrays. + ByteArray ByteArrayMode + + // BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler. + BinaryMarshaler BinaryMarshalerMode +} + +// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding, +// defined in RFC 7049 Section 3.9 with the following rules: +// +// 1. "Integers must be as small as possible." +// 2. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 3. The keys in every map must be sorted in length-first sorting order. +// See SortLengthFirst for details. +// 4. "Indefinite-length items must be made into definite-length items." +// 5. "If a protocol allows for IEEE floats, then additional canonicalization rules might +// need to be added. One example rule might be to have all floats start as a 64-bit +// float, then do a test conversion to a 32-bit float; if the result is the same numeric +// value, use the shorter value and repeat the process with a test conversion to a +// 16-bit float. (This rule selects 16-bit float for positive and negative Infinity +// as well.) Also, there are many representations for NaN. If NaN is an allowed value, +// it must always be represented as 0xf97e00." +func CanonicalEncOptions() EncOptions { + return EncOptions{ + Sort: SortCanonical, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// CTAP2EncOptions returns EncOptions for "CTAP2 Canonical CBOR" encoding, +// defined in CTAP specification, with the following rules: +// +// 1. "Integers must be encoded as small as possible." +// 2. "The representations of any floating-point values are not changed." +// 3. "The expression of lengths in major types 2 through 5 must be as short as possible." +// 4. "Indefinite-length items must be made into definite-length items."" +// 5. The keys in every map must be sorted in bytewise lexicographic order. +// See SortBytewiseLexical for details. +// 6. "Tags as defined in Section 2.4 in [RFC7049] MUST NOT be present." +func CTAP2EncOptions() EncOptions { + return EncOptions{ + Sort: SortCTAP2, + ShortestFloat: ShortestFloatNone, + NaNConvert: NaNConvertNone, + InfConvert: InfConvertNone, + IndefLength: IndefLengthForbidden, + TagsMd: TagsForbidden, + } +} + +// CoreDetEncOptions returns EncOptions for "Core Deterministic" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "Preferred serialization MUST be used. In particular, this means that arguments +// (see Section 3) for integers, lengths in major types 2 through 5, and tags MUST +// be as short as possible" +// "Floating point values also MUST use the shortest form that preserves the value" +// 2. "Indefinite-length items MUST NOT appear." +// 3. "The keys in every map MUST be sorted in the bytewise lexicographic order of +// their deterministic encodings." +func CoreDetEncOptions() EncOptions { + return EncOptions{ + Sort: SortCoreDeterministic, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + IndefLength: IndefLengthForbidden, + } +} + +// PreferredUnsortedEncOptions returns EncOptions for "Preferred Serialization" encoding, +// defined in RFC 7049bis with the following rules: +// +// 1. "The preferred serialization always uses the shortest form of representing the argument +// (Section 3);" +// 2. "it also uses the shortest floating-point encoding that preserves the value being +// encoded (see Section 5.5)." +// "The preferred encoding for a floating-point value is the shortest floating-point encoding +// that preserves its value, e.g., 0xf94580 for the number 5.5, and 0xfa45ad9c00 for the +// number 5555.5, unless the CBOR-based protocol specifically excludes the use of the shorter +// floating-point encodings. For NaN values, a shorter encoding is preferred if zero-padding +// the shorter significand towards the right reconstitutes the original NaN value (for many +// applications, the single NaN encoding 0xf97e00 will suffice)." +// 3. "Definite length encoding is preferred whenever the length is known at the time the +// serialization of the item starts." +func PreferredUnsortedEncOptions() EncOptions { + return EncOptions{ + Sort: SortNone, + ShortestFloat: ShortestFloat16, + NaNConvert: NaNConvert7e00, + InfConvert: InfConvertFloat16, + } +} + +// EncMode returns EncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) EncMode() (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// UserBufferEncMode returns UserBufferEncMode with immutable options and no tags (safe for concurrency). +func (opts EncOptions) UserBufferEncMode() (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.encMode() +} + +// EncModeWithTags returns EncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) EncModeWithTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithTags(tags) +} + +// UserBufferEncModeWithTags returns UserBufferEncMode with options and tags that are both immutable (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + // Copy tags + ts := tagSet(make(map[reflect.Type]*tagItem)) + syncTags := tags.(*syncTagSet) + syncTags.RLock() + for contentType, tag := range syncTags.t { + if tag.opts.EncTag != EncTagNone { + ts[contentType] = tag + } + } + syncTags.RUnlock() + if len(ts) > 0 { + em.tags = ts + } + return em, nil +} + +// EncModeWithSharedTags returns EncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) EncModeWithSharedTags(tags TagSet) (EncMode, error) { //nolint:gocritic // ignore hugeParam + return opts.UserBufferEncModeWithSharedTags(tags) +} + +// UserBufferEncModeWithSharedTags returns UserBufferEncMode with immutable options and mutable shared tags (safe for concurrency). +func (opts EncOptions) UserBufferEncModeWithSharedTags(tags TagSet) (UserBufferEncMode, error) { //nolint:gocritic // ignore hugeParam + if opts.TagsMd == TagsForbidden { + return nil, errors.New("cbor: cannot create EncMode with TagSet when TagsMd is TagsForbidden") + } + if tags == nil { + return nil, errors.New("cbor: cannot create EncMode with nil value as TagSet") + } + em, err := opts.encMode() + if err != nil { + return nil, err + } + em.tags = tags + return em, nil +} + +func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore hugeParam + if !opts.Sort.valid() { + return nil, errors.New("cbor: invalid SortMode " + strconv.Itoa(int(opts.Sort))) + } + if !opts.ShortestFloat.valid() { + return nil, errors.New("cbor: invalid ShortestFloatMode " + strconv.Itoa(int(opts.ShortestFloat))) + } + if !opts.NaNConvert.valid() { + return nil, errors.New("cbor: invalid NaNConvertMode " + strconv.Itoa(int(opts.NaNConvert))) + } + if !opts.InfConvert.valid() { + return nil, errors.New("cbor: invalid InfConvertMode " + strconv.Itoa(int(opts.InfConvert))) + } + if !opts.BigIntConvert.valid() { + return nil, errors.New("cbor: invalid BigIntConvertMode " + strconv.Itoa(int(opts.BigIntConvert))) + } + if !opts.Time.valid() { + return nil, errors.New("cbor: invalid TimeMode " + strconv.Itoa(int(opts.Time))) + } + if !opts.TimeTag.valid() { + return nil, errors.New("cbor: invalid TimeTag " + strconv.Itoa(int(opts.TimeTag))) + } + if !opts.IndefLength.valid() { + return nil, errors.New("cbor: invalid IndefLength " + strconv.Itoa(int(opts.IndefLength))) + } + if !opts.NilContainers.valid() { + return nil, errors.New("cbor: invalid NilContainers " + strconv.Itoa(int(opts.NilContainers))) + } + if !opts.TagsMd.valid() { + return nil, errors.New("cbor: invalid TagsMd " + strconv.Itoa(int(opts.TagsMd))) + } + if opts.TagsMd == TagsForbidden && opts.TimeTag == EncTagRequired { + return nil, errors.New("cbor: cannot set TagsMd to TagsForbidden when TimeTag is EncTagRequired") + } + if !opts.OmitEmpty.valid() { + return nil, errors.New("cbor: invalid OmitEmpty " + strconv.Itoa(int(opts.OmitEmpty))) + } + stringMajorType, err := opts.String.cborType() + if err != nil { + return nil, err + } + if !opts.FieldName.valid() { + return nil, errors.New("cbor: invalid FieldName " + strconv.Itoa(int(opts.FieldName))) + } + byteSliceLaterEncodingTag, err := opts.ByteSliceLaterFormat.encodingTag() + if err != nil { + return nil, err + } + if !opts.ByteArray.valid() { + return nil, errors.New("cbor: invalid ByteArray " + strconv.Itoa(int(opts.ByteArray))) + } + if !opts.BinaryMarshaler.valid() { + return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler))) + } + em := encMode{ + sort: opts.Sort, + shortestFloat: opts.ShortestFloat, + nanConvert: opts.NaNConvert, + infConvert: opts.InfConvert, + bigIntConvert: opts.BigIntConvert, + time: opts.Time, + timeTag: opts.TimeTag, + indefLength: opts.IndefLength, + nilContainers: opts.NilContainers, + tagsMd: opts.TagsMd, + omitEmpty: opts.OmitEmpty, + stringType: opts.String, + stringMajorType: stringMajorType, + fieldName: opts.FieldName, + byteSliceLaterFormat: opts.ByteSliceLaterFormat, + byteSliceLaterEncodingTag: byteSliceLaterEncodingTag, + byteArray: opts.ByteArray, + binaryMarshaler: opts.BinaryMarshaler, + } + return &em, nil +} + +// EncMode is the main interface for CBOR encoding. +type EncMode interface { + Marshal(v interface{}) ([]byte, error) + NewEncoder(w io.Writer) *Encoder + EncOptions() EncOptions +} + +// UserBufferEncMode is an interface for CBOR encoding, which extends EncMode by +// adding MarshalToBuffer to support user specified buffer rather than encoding +// into the built-in buffer pool. +type UserBufferEncMode interface { + EncMode + MarshalToBuffer(v interface{}, buf *bytes.Buffer) error + + // This private method is to prevent users implementing + // this interface and so future additions to it will + // not be breaking changes. + // See https://go.dev/blog/module-compatibility + unexport() +} + +type encMode struct { + tags tagProvider + sort SortMode + shortestFloat ShortestFloatMode + nanConvert NaNConvertMode + infConvert InfConvertMode + bigIntConvert BigIntConvertMode + time TimeMode + timeTag EncTagMode + indefLength IndefLengthMode + nilContainers NilContainersMode + tagsMd TagsMode + omitEmpty OmitEmptyMode + stringType StringMode + stringMajorType cborType + fieldName FieldNameMode + byteSliceLaterFormat ByteSliceLaterFormatMode + byteSliceLaterEncodingTag uint64 + byteArray ByteArrayMode + binaryMarshaler BinaryMarshalerMode +} + +var defaultEncMode, _ = EncOptions{}.encMode() + +// These four decoding modes are used by getMarshalerDecMode. +// maxNestedLevels, maxArrayElements, and maxMapPairs are +// set to max allowed limits to avoid rejecting Marshaler +// output that would have been the allowable output of a +// non-Marshaler object that exceeds default limits. +var ( + marshalerForbidIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsForbidden, + } + + marshalerAllowIndefLengthForbidTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsForbidden, + } + + marshalerForbidIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthForbidden, + tagsMd: TagsAllowed, + } + + marshalerAllowIndefLengthAllowTagsDecMode = decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: IndefLengthAllowed, + tagsMd: TagsAllowed, + } +) + +// getMarshalerDecMode returns one of four existing decoding modes +// which can be reused (safe for parallel use) for the purpose of +// checking if data returned by Marshaler is well-formed. +func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode { + switch { + case indefLength == IndefLengthAllowed && tagsMd == TagsAllowed: + return &marshalerAllowIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthAllowed && tagsMd == TagsForbidden: + return &marshalerAllowIndefLengthForbidTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsAllowed: + return &marshalerForbidIndefLengthAllowTagsDecMode + + case indefLength == IndefLengthForbidden && tagsMd == TagsForbidden: + return &marshalerForbidIndefLengthForbidTagsDecMode + + default: + // This should never happen, unless we add new options to + // IndefLengthMode or TagsMode without updating this function. + return &decMode{ + maxNestedLevels: maxMaxNestedLevels, + maxArrayElements: maxMaxArrayElements, + maxMapPairs: maxMaxMapPairs, + indefLength: indefLength, + tagsMd: tagsMd, + } + } +} + +// EncOptions returns user specified options used to create this EncMode. +func (em *encMode) EncOptions() EncOptions { + return EncOptions{ + Sort: em.sort, + ShortestFloat: em.shortestFloat, + NaNConvert: em.nanConvert, + InfConvert: em.infConvert, + BigIntConvert: em.bigIntConvert, + Time: em.time, + TimeTag: em.timeTag, + IndefLength: em.indefLength, + NilContainers: em.nilContainers, + TagsMd: em.tagsMd, + OmitEmpty: em.omitEmpty, + String: em.stringType, + FieldName: em.fieldName, + ByteSliceLaterFormat: em.byteSliceLaterFormat, + ByteArray: em.byteArray, + BinaryMarshaler: em.binaryMarshaler, + } +} + +func (em *encMode) unexport() {} + +func (em *encMode) encTagBytes(t reflect.Type) []byte { + if em.tags != nil { + if tagItem := em.tags.getTagItemFromType(t); tagItem != nil { + return tagItem.cborTagNum + } + } + return nil +} + +// Marshal returns the CBOR encoding of v using em encoding mode. +// +// See the documentation for Marshal for details. +func (em *encMode) Marshal(v interface{}) ([]byte, error) { + e := getEncodeBuffer() + + if err := encode(e, em, reflect.ValueOf(v)); err != nil { + putEncodeBuffer(e) + return nil, err + } + + buf := make([]byte, e.Len()) + copy(buf, e.Bytes()) + + putEncodeBuffer(e) + return buf, nil +} + +// MarshalToBuffer encodes v into provided buffer (instead of using built-in buffer pool) +// and uses em encoding mode. +// +// NOTE: Unlike Marshal, the buffer provided to MarshalToBuffer can contain +// partially encoded data if error is returned. +// +// See Marshal for more details. +func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error { + if buf == nil { + return fmt.Errorf("cbor: encoding buffer provided by user is nil") + } + return encode(buf, em, reflect.ValueOf(v)) +} + +// NewEncoder returns a new encoder that writes to w using em EncMode. +func (em *encMode) NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, em: em} +} + +// encodeBufferPool caches unused bytes.Buffer objects for later reuse. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(32) // TODO: make this configurable + return e + }, +} + +func getEncodeBuffer() *bytes.Buffer { + return encodeBufferPool.Get().(*bytes.Buffer) +} + +func putEncodeBuffer(e *bytes.Buffer) { + e.Reset() + encodeBufferPool.Put(e) +} + +type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error +type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error) + +func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if !v.IsValid() { + // v is zero value + e.Write(cborNil) + return nil + } + vt := v.Type() + f, _ := getEncodeFunc(vt) + if f == nil { + return &UnsupportedTypeError{vt} + } + + return f(e, em, v) +} + +func encodeBool(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + b := cborFalse + if v.Bool() { + b = cborTrue + } + e.Write(b) + return nil +} + +func encodeInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + i := v.Int() + if i >= 0 { + encodeHead(e, byte(cborTypePositiveInt), uint64(i)) + return nil + } + i = i*(-1) - 1 + encodeHead(e, byte(cborTypeNegativeInt), uint64(i)) + return nil +} + +func encodeUint(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypePositiveInt), v.Uint()) + return nil +} + +func encodeFloat(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + f64 := v.Float() + if math.IsNaN(f64) { + return encodeNaN(e, em, v) + } + if math.IsInf(f64, 0) { + return encodeInf(e, em, v) + } + fopt := em.shortestFloat + if v.Kind() == reflect.Float64 && (fopt == ShortestFloatNone || cannotFitFloat32(f64)) { + // Encode float64 + // Don't use encodeFloat64() because it cannot be inlined. + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | byte(additionalInformationAsFloat64) + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil + } + + f32 := float32(f64) + if fopt == ShortestFloat16 { + var f16 float16.Float16 + p := float16.PrecisionFromfloat32(f32) + if p == float16.PrecisionExact { + // Roundtrip float32->float16->float32 test isn't needed. + f16 = float16.Fromfloat32(f32) + } else if p == float16.PrecisionUnknown { + // Try roundtrip float32->float16->float32 to determine if float32 can fit into float16. + f16 = float16.Fromfloat32(f32) + if f16.Float32() == f32 { + p = float16.PrecisionExact + } + } + if p == float16.PrecisionExact { + // Encode float16 + // Don't use encodeFloat16() because it cannot be inlined. + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil + } + } + + // Encode float32 + // Don't use encodeFloat32() because it cannot be inlined. + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeInf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + f64 := v.Float() + switch em.infConvert { + case InfConvertReject: + return &UnsupportedValueError{msg: "floating-point infinity"} + + case InfConvertFloat16: + if f64 > 0 { + e.Write(cborPositiveInfinity) + } else { + e.Write(cborNegativeInfinity) + } + return nil + } + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, f64) + } + return encodeFloat32(e, float32(f64)) +} + +func encodeNaN(e *bytes.Buffer, em *encMode, v reflect.Value) error { + switch em.nanConvert { + case NaNConvert7e00: + e.Write(cborNaN) + return nil + + case NaNConvertNone: + if v.Kind() == reflect.Float64 { + return encodeFloat64(e, v.Float()) + } + f32 := float32NaNFromReflectValue(v) + return encodeFloat32(e, f32) + + case NaNConvertReject: + return &UnsupportedValueError{msg: "floating-point NaN"} + + default: // NaNConvertPreserveSignal, NaNConvertQuiet + if v.Kind() == reflect.Float64 { + f64 := v.Float() + f64bits := math.Float64bits(f64) + if em.nanConvert == NaNConvertQuiet && f64bits&(1<<51) == 0 { + f64bits |= 1 << 51 // Set quiet bit = 1 + f64 = math.Float64frombits(f64bits) + } + // The lower 29 bits are dropped when converting from float64 to float32. + if f64bits&0x1fffffff != 0 { + // Encode NaN as float64 because dropped coef bits from float64 to float32 are not all 0s. + return encodeFloat64(e, f64) + } + // Create float32 from float64 manually because float32(f64) always turns on NaN's quiet bits. + sign := uint32(f64bits>>32) & (1 << 31) + exp := uint32(0x7f800000) + coef := uint32((f64bits & 0xfffffffffffff) >> 29) + f32bits := sign | exp | coef + f32 := math.Float32frombits(f32bits) + // The lower 13 bits are dropped when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + // Encode NaN as float16 + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } + + f32 := float32NaNFromReflectValue(v) + f32bits := math.Float32bits(f32) + if em.nanConvert == NaNConvertQuiet && f32bits&(1<<22) == 0 { + f32bits |= 1 << 22 // Set quiet bit = 1 + f32 = math.Float32frombits(f32bits) + } + // The lower 13 bits are dropped coef bits when converting from float32 to float16. + if f32bits&0x1fff != 0 { + // Encode NaN as float32 because dropped coef bits from float32 to float16 are not all 0s. + return encodeFloat32(e, f32) + } + f16, _ := float16.FromNaN32ps(f32) // Ignore err because it only returns error when f32 is not a NaN. + return encodeFloat16(e, f16) + } +} + +func encodeFloat16(e *bytes.Buffer, f16 float16.Float16) error { + const argumentSize = 2 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat16 + binary.BigEndian.PutUint16(scratch[1:], uint16(f16)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat32(e *bytes.Buffer, f32 float32) error { + const argumentSize = 4 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat32 + binary.BigEndian.PutUint32(scratch[1:], math.Float32bits(f32)) + e.Write(scratch[:]) + return nil +} + +func encodeFloat64(e *bytes.Buffer, f64 float64) error { + const argumentSize = 8 + const headSize = 1 + argumentSize + var scratch [headSize]byte + scratch[0] = byte(cborTypePrimitives) | additionalInformationAsFloat64 + binary.BigEndian.PutUint64(scratch[1:], math.Float64bits(f64)) + e.Write(scratch[:]) + return nil +} + +func encodeByteString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + vk := v.Kind() + if vk == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if vk == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && em.byteSliceLaterEncodingTag != 0 { + encodeHead(e, byte(cborTypeTag), em.byteSliceLaterEncodingTag) + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + slen := v.Len() + if slen == 0 { + return e.WriteByte(byte(cborTypeByteString)) + } + encodeHead(e, byte(cborTypeByteString), uint64(slen)) + if vk == reflect.Array { + for i := 0; i < slen; i++ { + e.WriteByte(byte(v.Index(i).Uint())) + } + return nil + } + e.Write(v.Bytes()) + return nil +} + +func encodeString(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + s := v.String() + encodeHead(e, byte(em.stringMajorType), uint64(len(s))) + e.WriteString(s) + return nil +} + +type arrayEncodeFunc struct { + f encodeFunc +} + +func (ae arrayEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.byteArray == ByteArrayToByteSlice && v.Type().Elem().Kind() == reflect.Uint8 { + return encodeByteString(e, em, v) + } + if v.Kind() == reflect.Slice && v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + alen := v.Len() + if alen == 0 { + return e.WriteByte(byte(cborTypeArray)) + } + encodeHead(e, byte(cborTypeArray), uint64(alen)) + for i := 0; i < alen; i++ { + if err := ae.f(e, em, v.Index(i)); err != nil { + return err + } + } + return nil +} + +// encodeKeyValueFunc encodes key/value pairs in map (v). +// If kvs is provided (having the same length as v), length of encoded key and value are stored in kvs. +// kvs is used for canonical encoding of map. +type encodeKeyValueFunc func(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error + +type mapEncodeFunc struct { + e encodeKeyValueFunc +} + +func (me mapEncodeFunc) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() && em.nilContainers == NilContainerAsNull { + e.Write(cborNil) + return nil + } + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + mlen := v.Len() + if mlen == 0 { + return e.WriteByte(byte(cborTypeMap)) + } + + encodeHead(e, byte(cborTypeMap), uint64(mlen)) + if em.sort == SortNone || em.sort == SortFastShuffle || mlen <= 1 { + return me.e(e, em, v, nil) + } + + kvsp := getKeyValues(v.Len()) // for sorting keys + defer putKeyValues(kvsp) + kvs := *kvsp + + kvBeginOffset := e.Len() + if err := me.e(e, em, v, kvs); err != nil { + return err + } + kvTotalLen := e.Len() - kvBeginOffset + + // Use the capacity at the tail of the encode buffer as a staging area to rearrange the + // encoded pairs into sorted order. + e.Grow(kvTotalLen) + tmp := e.Bytes()[e.Len() : e.Len()+kvTotalLen] // Can use e.AvailableBuffer() in Go 1.21+. + dst := e.Bytes()[kvBeginOffset:] + + if em.sort == SortBytewiseLexical { + sort.Sort(&bytewiseKeyValueSorter{kvs: kvs, data: dst}) + } else { + sort.Sort(&lengthFirstKeyValueSorter{kvs: kvs, data: dst}) + } + + // This is where the encoded bytes are actually rearranged in the output buffer to reflect + // the desired order. + sortedOffset := 0 + for _, kv := range kvs { + copy(tmp[sortedOffset:], dst[kv.offset:kv.nextOffset]) + sortedOffset += kv.nextOffset - kv.offset + } + copy(dst, tmp[:kvTotalLen]) + + return nil + +} + +// keyValue is the position of an encoded pair in a buffer. All offsets are zero-based and relative +// to the first byte of the first encoded pair. +type keyValue struct { + offset int + valueOffset int + nextOffset int +} + +type bytewiseKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *bytewiseKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *bytewiseKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *bytewiseKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +type lengthFirstKeyValueSorter struct { + kvs []keyValue + data []byte +} + +func (x *lengthFirstKeyValueSorter) Len() int { + return len(x.kvs) +} + +func (x *lengthFirstKeyValueSorter) Swap(i, j int) { + x.kvs[i], x.kvs[j] = x.kvs[j], x.kvs[i] +} + +func (x *lengthFirstKeyValueSorter) Less(i, j int) bool { + kvi, kvj := x.kvs[i], x.kvs[j] + if keyLengthDifference := (kvi.valueOffset - kvi.offset) - (kvj.valueOffset - kvj.offset); keyLengthDifference != 0 { + return keyLengthDifference < 0 + } + return bytes.Compare(x.data[kvi.offset:kvi.valueOffset], x.data[kvj.offset:kvj.valueOffset]) <= 0 +} + +var keyValuePool = sync.Pool{} + +func getKeyValues(length int) *[]keyValue { + v := keyValuePool.Get() + if v == nil { + y := make([]keyValue, length) + return &y + } + x := v.(*[]keyValue) + if cap(*x) >= length { + *x = (*x)[:length] + return x + } + // []keyValue from the pool does not have enough capacity. + // Return it back to the pool and create a new one. + keyValuePool.Put(x) + y := make([]keyValue, length) + return &y +} + +func putKeyValues(x *[]keyValue) { + *x = (*x)[:0] + keyValuePool.Put(x) +} + +func encodeStructToArray(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + flds := structType.fields + + encodeHead(e, byte(cborTypeArray), uint64(len(flds))) + for i := 0; i < len(flds); i++ { + f := flds[i] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Write CBOR nil for null pointer to embedded struct + e.Write(cborNil) + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + } + return nil +} + +func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return err + } + + flds := structType.getFields(em) + + start := 0 + if em.sort == SortFastShuffle && len(flds) > 0 { + start = rand.Intn(len(flds)) //nolint:gosec // Don't need a CSPRNG for deck cutting. + } + + if b := em.encTagBytes(v.Type()); b != nil { + e.Write(b) + } + + // Encode head with struct field count. + // Head is rewritten later if actual encoded field count is different from struct field count. + encodedHeadLen := encodeHead(e, byte(cborTypeMap), uint64(len(flds))) + + kvbegin := e.Len() + kvcount := 0 + for offset := 0; offset < len(flds); offset++ { + f := flds[(start+offset)%len(flds)] + + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + if f.omitEmpty { + empty, err := f.ief(em, fv) + if err != nil { + return err + } + if empty { + continue + } + } + + if !f.keyAsInt && em.fieldName == FieldNameToByteString { + e.Write(f.cborNameByteString) + } else { // int or text string + e.Write(f.cborName) + } + + if err := f.ef(e, em, fv); err != nil { + return err + } + + kvcount++ + } + + if len(flds) == kvcount { + // Encoded element count in head is the same as actual element count. + return nil + } + + // Overwrite the bytes that were reserved for the head before encoding the map entries. + var actualHeadLen int + { + headbuf := *bytes.NewBuffer(e.Bytes()[kvbegin-encodedHeadLen : kvbegin-encodedHeadLen : kvbegin]) + actualHeadLen = encodeHead(&headbuf, byte(cborTypeMap), uint64(kvcount)) + } + + if actualHeadLen == encodedHeadLen { + // The bytes reserved for the encoded head were exactly the right size, so the + // encoded entries are already in their final positions. + return nil + } + + // We reserved more bytes than needed for the encoded head, based on the number of fields + // encoded. The encoded entries are offset to the right by the number of excess reserved + // bytes. Shift the entries left to remove the gap. + excessReservedBytes := encodedHeadLen - actualHeadLen + dst := e.Bytes()[kvbegin-excessReservedBytes : e.Len()-excessReservedBytes] + src := e.Bytes()[kvbegin:e.Len()] + copy(dst, src) + + // After shifting, the excess bytes are at the end of the output buffer and they are + // garbage. + e.Truncate(e.Len() - excessReservedBytes) + return nil +} + +func encodeIntf(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if v.IsNil() { + e.Write(cborNil) + return nil + } + return encode(e, em, v.Elem()) +} + +func encodeTime(e *bytes.Buffer, em *encMode, v reflect.Value) error { + t := v.Interface().(time.Time) + if t.IsZero() { + e.Write(cborNil) // Even if tag is required, encode as CBOR null. + return nil + } + if em.timeTag == EncTagRequired { + tagNumber := 1 + if em.time == TimeRFC3339 || em.time == TimeRFC3339Nano { + tagNumber = 0 + } + encodeHead(e, byte(cborTypeTag), uint64(tagNumber)) + } + switch em.time { + case TimeUnix: + secs := t.Unix() + return encodeInt(e, em, reflect.ValueOf(secs)) + + case TimeUnixMicro: + t = t.UTC().Round(time.Microsecond) + f := float64(t.UnixNano()) / 1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeUnixDynamic: + t = t.UTC().Round(time.Microsecond) + secs, nsecs := t.Unix(), uint64(t.Nanosecond()) + if nsecs == 0 { + return encodeInt(e, em, reflect.ValueOf(secs)) + } + f := float64(secs) + float64(nsecs)/1e9 + return encodeFloat(e, em, reflect.ValueOf(f)) + + case TimeRFC3339: + s := t.Format(time.RFC3339) + return encodeString(e, em, reflect.ValueOf(s)) + + default: // TimeRFC3339Nano + s := t.Format(time.RFC3339Nano) + return encodeString(e, em, reflect.ValueOf(s)) + } +} + +func encodeBigInt(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.bigIntConvert == BigIntConvertReject { + return &UnsupportedTypeError{Type: typeBigInt} + } + + vbi := v.Interface().(big.Int) + sign := vbi.Sign() + bi := new(big.Int).SetBytes(vbi.Bytes()) // bi is absolute value of v + if sign < 0 { + // For negative number, convert to CBOR encoded number (-v-1). + bi.Sub(bi, big.NewInt(1)) + } + + if em.bigIntConvert == BigIntConvertShortest { + if bi.IsUint64() { + if sign >= 0 { + // Encode as CBOR pos int (major type 0) + encodeHead(e, byte(cborTypePositiveInt), bi.Uint64()) + return nil + } + // Encode as CBOR neg int (major type 1) + encodeHead(e, byte(cborTypeNegativeInt), bi.Uint64()) + return nil + } + } + + tagNum := 2 + if sign < 0 { + tagNum = 3 + } + // Write tag number + encodeHead(e, byte(cborTypeTag), uint64(tagNum)) + // Write bignum byte string + b := bi.Bytes() + encodeHead(e, byte(cborTypeByteString), uint64(len(b))) + e.Write(b) + return nil +} + +type binaryMarshalerEncoder struct { + alternateEncode encodeFunc + alternateIsEmpty isEmptyFunc +} + +func (bme binaryMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateEncode(e, em, v) + } + + vt := v.Type() + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(vt) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return err + } + if b := em.encTagBytes(vt); b != nil { + e.Write(b) + } + encodeHead(e, byte(cborTypeByteString), uint64(len(data))) + e.Write(data) + return nil +} + +func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) { + if em.binaryMarshaler != BinaryMarshalerByteString { + return bme.alternateIsEmpty(em, v) + } + + m, ok := v.Interface().(encoding.BinaryMarshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(encoding.BinaryMarshaler) + } + data, err := m.MarshalBinary() + if err != nil { + return false, err + } + return len(data) == 0, nil +} + +func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden && v.Type() == typeRawTag { + return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden") + } + m, ok := v.Interface().(Marshaler) + if !ok { + pv := reflect.New(v.Type()) + pv.Elem().Set(v) + m = pv.Interface().(Marshaler) + } + data, err := m.MarshalCBOR() + if err != nil { + return err + } + + // Verify returned CBOR data item from MarshalCBOR() is well-formed and passes tag validity for builtin tags 0-3. + d := decoder{data: data, dm: getMarshalerDecMode(em.indefLength, em.tagsMd)} + err = d.wellformed(false, true) + if err != nil { + return &MarshalerError{typ: v.Type(), err: err} + } + + e.Write(data) + return nil +} + +func encodeTag(e *bytes.Buffer, em *encMode, v reflect.Value) error { + if em.tagsMd == TagsForbidden { + return errors.New("cbor: cannot encode cbor.Tag when TagsMd is TagsForbidden") + } + + t := v.Interface().(Tag) + + if t.Number == 0 && t.Content == nil { + // Marshal uninitialized cbor.Tag + e.Write(cborNil) + return nil + } + + // Marshal tag number + encodeHead(e, byte(cborTypeTag), t.Number) + + vem := *em // shallow copy + + // For built-in tags, disable settings that may introduce tag validity errors when + // marshaling certain Content values. + switch t.Number { + case tagNumRFC3339Time: + vem.stringType = StringToTextString + vem.stringMajorType = cborTypeTextString + case tagNumUnsignedBignum, tagNumNegativeBignum: + vem.byteSliceLaterFormat = ByteSliceLaterFormatNone + vem.byteSliceLaterEncodingTag = 0 + } + + // Marshal tag content + return encode(e, &vem, reflect.ValueOf(t.Content)) +} + +// encodeHead writes CBOR head of specified type t and returns number of bytes written. +func encodeHead(e *bytes.Buffer, t byte, n uint64) int { + if n <= maxAdditionalInformationWithoutArgument { + const headSize = 1 + e.WriteByte(t | byte(n)) + return headSize + } + + if n <= math.MaxUint8 { + const headSize = 2 + scratch := [headSize]byte{ + t | byte(additionalInformationWith1ByteArgument), + byte(n), + } + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint16 { + const headSize = 3 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith2ByteArgument) + binary.BigEndian.PutUint16(scratch[1:], uint16(n)) + e.Write(scratch[:]) + return headSize + } + + if n <= math.MaxUint32 { + const headSize = 5 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith4ByteArgument) + binary.BigEndian.PutUint32(scratch[1:], uint32(n)) + e.Write(scratch[:]) + return headSize + } + + const headSize = 9 + var scratch [headSize]byte + scratch[0] = t | byte(additionalInformationWith8ByteArgument) + binary.BigEndian.PutUint64(scratch[1:], n) + e.Write(scratch[:]) + return headSize +} + +var ( + typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem() + typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + typeRawMessage = reflect.TypeOf(RawMessage(nil)) + typeByteString = reflect.TypeOf(ByteString("")) +) + +func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) { + k := t.Kind() + if k == reflect.Ptr { + return getEncodeIndirectValueFunc(t), isEmptyPtr + } + switch t { + case typeSimpleValue: + return encodeMarshalerType, isEmptyUint + + case typeTag: + return encodeTag, alwaysNotEmpty + + case typeTime: + return encodeTime, alwaysNotEmpty + + case typeBigInt: + return encodeBigInt, alwaysNotEmpty + + case typeRawMessage: + return encodeMarshalerType, isEmptySlice + + case typeByteString: + return encodeMarshalerType, isEmptyString + } + if reflect.PtrTo(t).Implements(typeMarshaler) { + return encodeMarshalerType, alwaysNotEmpty + } + if reflect.PtrTo(t).Implements(typeBinaryMarshaler) { + defer func() { + // capture encoding method used for modes that disable BinaryMarshaler + bme := binaryMarshalerEncoder{ + alternateEncode: ef, + alternateIsEmpty: ief, + } + ef = bme.encode + ief = bme.isEmpty + }() + } + switch k { + case reflect.Bool: + return encodeBool, isEmptyBool + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return encodeInt, isEmptyInt + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return encodeUint, isEmptyUint + + case reflect.Float32, reflect.Float64: + return encodeFloat, isEmptyFloat + + case reflect.String: + return encodeString, isEmptyString + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteString, isEmptySlice + } + fallthrough + + case reflect.Array: + f, _ := getEncodeFunc(t.Elem()) + if f == nil { + return nil, nil + } + return arrayEncodeFunc{f: f}.encode, isEmptySlice + + case reflect.Map: + f := getEncodeMapFunc(t) + if f == nil { + return nil, nil + } + return f, isEmptyMap + + case reflect.Struct: + // Get struct's special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + if hasToArrayOption(tag) { + return encodeStructToArray, isEmptyStruct + } + } + } + return encodeStruct, isEmptyStruct + + case reflect.Interface: + return encodeIntf, isEmptyIntf + } + return nil, nil +} + +func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + f, _ := getEncodeFunc(t) + if f == nil { + return nil + } + return func(e *bytes.Buffer, em *encMode, v reflect.Value) error { + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + if v.Kind() == reflect.Ptr && v.IsNil() { + e.Write(cborNil) + return nil + } + return f(e, em, v) + } +} + +func alwaysNotEmpty(_ *encMode, _ reflect.Value) (empty bool, err error) { + return false, nil +} + +func isEmptyBool(_ *encMode, v reflect.Value) (bool, error) { + return !v.Bool(), nil +} + +func isEmptyInt(_ *encMode, v reflect.Value) (bool, error) { + return v.Int() == 0, nil +} + +func isEmptyUint(_ *encMode, v reflect.Value) (bool, error) { + return v.Uint() == 0, nil +} + +func isEmptyFloat(_ *encMode, v reflect.Value) (bool, error) { + return v.Float() == 0.0, nil +} + +func isEmptyString(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptySlice(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyMap(_ *encMode, v reflect.Value) (bool, error) { + return v.Len() == 0, nil +} + +func isEmptyPtr(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyIntf(_ *encMode, v reflect.Value) (bool, error) { + return v.IsNil(), nil +} + +func isEmptyStruct(em *encMode, v reflect.Value) (bool, error) { + structType, err := getEncodingStructType(v.Type()) + if err != nil { + return false, err + } + + if em.omitEmpty == OmitEmptyGoValue { + return false, nil + } + + if structType.toArray { + return len(structType.fields) == 0, nil + } + + if len(structType.fields) > len(structType.omitEmptyFieldsIdx) { + return false, nil + } + + for _, i := range structType.omitEmptyFieldsIdx { + f := structType.fields[i] + + // Get field value + var fv reflect.Value + if len(f.idx) == 1 { + fv = v.Field(f.idx[0]) + } else { + // Get embedded field value. No error is expected. + fv, _ = getFieldValue(v, f.idx, func(reflect.Value) (reflect.Value, error) { + // Skip null pointer to embedded struct + return reflect.Value{}, nil + }) + if !fv.IsValid() { + continue + } + } + + empty, err := f.ief(em, fv) + if err != nil { + return false, err + } + if !empty { + return false, nil + } + } + return true, nil +} + +func cannotFitFloat32(f64 float64) bool { + f32 := float32(f64) + return float64(f32) != f64 +} + +// float32NaNFromReflectValue extracts float32 NaN from reflect.Value while preserving NaN's quiet bit. +func float32NaNFromReflectValue(v reflect.Value) float32 { + // Keith Randall's workaround for issue https://github.com/golang/go/issues/36400 + p := reflect.New(v.Type()) + p.Elem().Set(v) + f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32) + return f32 +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map.go b/vendor/github.com/fxamacker/cbor/v2/encode_map.go new file mode 100644 index 00000000..8b4b4bbc --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map.go @@ -0,0 +1,94 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build go1.20 + +package cbor + +import ( + "bytes" + "reflect" + "sync" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc + kpool, vpool sync.Pool +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + iterk := me.kpool.Get().(*reflect.Value) + defer func() { + iterk.SetZero() + me.kpool.Put(iterk) + }() + iterv := me.vpool.Get().(*reflect.Value) + defer func() { + iterv.SetZero() + me.vpool.Put(iterv) + }() + + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + if err := me.kf(e, em, *iterk); err != nil { + return err + } + if err := me.ef(e, em, *iterv); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + iterk.SetIterKey(iter) + iterv.SetIterValue(iter) + + offset := e.Len() + if err := me.kf(e, em, *iterk); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, *iterv); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{ + kf: kf, + ef: ef, + kpool: sync.Pool{ + New: func() interface{} { + rk := reflect.New(t.Key()).Elem() + return &rk + }, + }, + vpool: sync.Pool{ + New: func() interface{} { + rv := reflect.New(t.Elem()).Elem() + return &rv + }, + }, + } + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go new file mode 100644 index 00000000..31c39336 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go @@ -0,0 +1,60 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +//go:build !go1.20 + +package cbor + +import ( + "bytes" + "reflect" +) + +type mapKeyValueEncodeFunc struct { + kf, ef encodeFunc +} + +func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error { + if kvs == nil { + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + } + return nil + } + + initial := e.Len() + for i, iter := 0, v.MapRange(); iter.Next(); i++ { + offset := e.Len() + if err := me.kf(e, em, iter.Key()); err != nil { + return err + } + valueOffset := e.Len() + if err := me.ef(e, em, iter.Value()); err != nil { + return err + } + kvs[i] = keyValue{ + offset: offset - initial, + valueOffset: valueOffset - initial, + nextOffset: e.Len() - initial, + } + } + + return nil +} + +func getEncodeMapFunc(t reflect.Type) encodeFunc { + kf, _ := getEncodeFunc(t.Key()) + ef, _ := getEncodeFunc(t.Elem()) + if kf == nil || ef == nil { + return nil + } + mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef} + return mapEncodeFunc{ + e: mkv.encodeKeyValues, + }.encode +} diff --git a/vendor/github.com/fxamacker/cbor/v2/simplevalue.go b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go new file mode 100644 index 00000000..de175cee --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/simplevalue.go @@ -0,0 +1,69 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" +) + +// SimpleValue represents CBOR simple value. +// CBOR simple value is: +// - an extension point like CBOR tag. +// - a subset of CBOR major type 7 that isn't floating-point. +// - "identified by a number between 0 and 255, but distinct from that number itself". +// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key. +// +// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined". +// Other CBOR simple values are currently unassigned/reserved by IANA. +type SimpleValue uint8 + +var ( + typeSimpleValue = reflect.TypeOf(SimpleValue(0)) +) + +// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7). +func (sv SimpleValue) MarshalCBOR() ([]byte, error) { + // RFC 8949 3.3. Floating-Point Numbers and Values with No Content says: + // "An encoder MUST NOT issue two-byte sequences that start with 0xf8 + // (major type 7, additional information 24) and continue with a byte + // less than 0x20 (32 decimal). Such sequences are not well-formed. + // (This implies that an encoder cannot encode false, true, null, or + // undefined in two-byte sequences and that only the one-byte variants + // of these are well-formed; more generally speaking, each simple value + // only has a single representation variant)." + + switch { + case sv <= maxSimpleValueInAdditionalInformation: + return []byte{byte(cborTypePrimitives) | byte(sv)}, nil + + case sv >= minSimpleValueIn1ByteArgument: + return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil + + default: + return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)} + } +} + +// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue. +func (sv *SimpleValue) UnmarshalCBOR(data []byte) error { + if sv == nil { + return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer") + } + + d := decoder{data: data, dm: defaultDecMode} + + typ, ai, val := d.getHead() + + if typ != cborTypePrimitives { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"} + } + if ai > additionalInformationWith1ByteArgument { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"} + } + + // It is safe to cast val to uint8 here because + // - data is already verified to be well-formed CBOR simple value and + // - val is <= math.MaxUint8. + *sv = SimpleValue(val) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/stream.go b/vendor/github.com/fxamacker/cbor/v2/stream.go new file mode 100644 index 00000000..507ab6c1 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/stream.go @@ -0,0 +1,277 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "bytes" + "errors" + "io" + "reflect" +) + +// Decoder reads and decodes CBOR values from io.Reader. +type Decoder struct { + r io.Reader + d decoder + buf []byte + off int // next read offset in buf + bytesRead int +} + +// NewDecoder returns a new decoder that reads and decodes from r using +// the default decoding options. +func NewDecoder(r io.Reader) *Decoder { + return defaultDecMode.NewDecoder(r) +} + +// Decode reads CBOR value and decodes it into the value pointed to by v. +func (dec *Decoder) Decode(v interface{}) error { + _, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.d.reset(dec.buf[dec.off:]) + err = dec.d.value(v) + + // Increment dec.off even if decoding err is not nil because + // dec.d.off points to the next CBOR data item if current + // CBOR data item is valid but failed to be decoded into v. + // This allows next CBOR data item to be decoded in next + // call to this function. + dec.off += dec.d.off + dec.bytesRead += dec.d.off + + return err +} + +// Skip skips to the next CBOR data item (if there is any), +// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc. +func (dec *Decoder) Skip() error { + n, err := dec.readNext() + if err != nil { + // Return validation error or read error. + return err + } + + dec.off += n + dec.bytesRead += n + return nil +} + +// NumBytesRead returns the number of bytes read. +func (dec *Decoder) NumBytesRead() int { + return dec.bytesRead +} + +// Buffered returns a reader for data remaining in Decoder's buffer. +// Returned reader is valid until the next call to Decode or Skip. +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.buf[dec.off:]) +} + +// readNext() reads next CBOR data item from Reader to buffer. +// It returns the size of next CBOR data item. +// It also returns validation error or read error if any. +func (dec *Decoder) readNext() (int, error) { + var readErr error + var validErr error + + for { + // Process any unread data in dec.buf. + if dec.off < len(dec.buf) { + dec.d.reset(dec.buf[dec.off:]) + off := dec.off // Save offset before data validation + validErr = dec.d.wellformed(true, false) + dec.off = off // Restore offset + + if validErr == nil { + return dec.d.off, nil + } + + if validErr != io.ErrUnexpectedEOF { + return 0, validErr + } + + // Process last read error on io.ErrUnexpectedEOF. + if readErr != nil { + if readErr == io.EOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + return 0, readErr + } + } + + // More data is needed and there was no read error. + var n int + for n == 0 { + n, readErr = dec.read() + if n == 0 && readErr != nil { + // No more data can be read and read error is encountered. + // At this point, validErr is either nil or io.ErrUnexpectedEOF. + if readErr == io.EOF { + if validErr == io.ErrUnexpectedEOF { + // current CBOR data item is incomplete. + return 0, io.ErrUnexpectedEOF + } + } + return 0, readErr + } + } + + // At this point, dec.buf contains new data from last read (n > 0). + } +} + +// read() reads data from Reader to buffer. +// It returns number of bytes read and any read error encountered. +// Postconditions: +// - dec.buf contains previously unread data and new data. +// - dec.off is 0. +func (dec *Decoder) read() (int, error) { + // Grow buf if needed. + const minRead = 512 + if cap(dec.buf)-len(dec.buf)+dec.off < minRead { + oldUnreadBuf := dec.buf[dec.off:] + dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead) + dec.overwriteBuf(oldUnreadBuf) + } + + // Copy unread data over read data and reset off to 0. + if dec.off > 0 { + dec.overwriteBuf(dec.buf[dec.off:]) + } + + // Read from reader and reslice buf. + n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)]) + dec.buf = dec.buf[0 : len(dec.buf)+n] + return n, err +} + +func (dec *Decoder) overwriteBuf(newBuf []byte) { + n := copy(dec.buf, newBuf) + dec.buf = dec.buf[:n] + dec.off = 0 +} + +// Encoder writes CBOR values to io.Writer. +type Encoder struct { + w io.Writer + em *encMode + indefTypes []cborType +} + +// NewEncoder returns a new encoder that writes to w using the default encoding options. +func NewEncoder(w io.Writer) *Encoder { + return defaultEncMode.NewEncoder(w) +} + +// Encode writes the CBOR encoding of v. +func (enc *Encoder) Encode(v interface{}) error { + if len(enc.indefTypes) > 0 && v != nil { + indefType := enc.indefTypes[len(enc.indefTypes)-1] + if indefType == cborTypeTextString { + k := reflect.TypeOf(v).Kind() + if k != reflect.String { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string") + } + } else if indefType == cborTypeByteString { + t := reflect.TypeOf(v) + k := t.Kind() + if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 { + return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string") + } + } + } + + buf := getEncodeBuffer() + + err := encode(buf, enc.em, reflect.ValueOf(v)) + if err == nil { + _, err = enc.w.Write(buf.Bytes()) + } + + putEncodeBuffer(buf) + return err +} + +// StartIndefiniteByteString starts byte string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteByteString() error { + return enc.startIndefinite(cborTypeByteString) +} + +// StartIndefiniteTextString starts text string encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes definite length text strings +// ("chunks") as one contiguous string until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteTextString() error { + return enc.startIndefinite(cborTypeTextString) +} + +// StartIndefiniteArray starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the array +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteArray() error { + return enc.startIndefinite(cborTypeArray) +} + +// StartIndefiniteMap starts array encoding of indefinite length. +// Subsequent calls of (*Encoder).Encode() encodes elements of the map +// until EndIndefinite is called. +func (enc *Encoder) StartIndefiniteMap() error { + return enc.startIndefinite(cborTypeMap) +} + +// EndIndefinite closes last opened indefinite length value. +func (enc *Encoder) EndIndefinite() error { + if len(enc.indefTypes) == 0 { + return errors.New("cbor: cannot encode \"break\" code outside indefinite length values") + } + _, err := enc.w.Write([]byte{cborBreakFlag}) + if err == nil { + enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1] + } + return err +} + +var cborIndefHeader = map[cborType][]byte{ + cborTypeByteString: {cborByteStringWithIndefiniteLengthHead}, + cborTypeTextString: {cborTextStringWithIndefiniteLengthHead}, + cborTypeArray: {cborArrayWithIndefiniteLengthHead}, + cborTypeMap: {cborMapWithIndefiniteLengthHead}, +} + +func (enc *Encoder) startIndefinite(typ cborType) error { + if enc.em.indefLength == IndefLengthForbidden { + return &IndefiniteLengthError{typ} + } + _, err := enc.w.Write(cborIndefHeader[typ]) + if err == nil { + enc.indefTypes = append(enc.indefTypes, typ) + } + return err +} + +// RawMessage is a raw encoded CBOR value. +type RawMessage []byte + +// MarshalCBOR returns m or CBOR nil if m is nil. +func (m RawMessage) MarshalCBOR() ([]byte, error) { + if len(m) == 0 { + return cborNil, nil + } + return m, nil +} + +// UnmarshalCBOR creates a copy of data and saves to *m. +func (m *RawMessage) UnmarshalCBOR(data []byte) error { + if m == nil { + return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer") + } + *m = append((*m)[0:0], data...) + return nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/structfields.go b/vendor/github.com/fxamacker/cbor/v2/structfields.go new file mode 100644 index 00000000..81228acf --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/structfields.go @@ -0,0 +1,260 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "reflect" + "sort" + "strings" +) + +type field struct { + name string + nameAsInt int64 // used to decoder to match field name with CBOR int + cborName []byte + cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3 + idx []int + typ reflect.Type + ef encodeFunc + ief isEmptyFunc + typInfo *typeInfo // used to decoder to reuse type info + tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields) + omitEmpty bool // used to skip empty field + keyAsInt bool // used to encode/decode field name as int +} + +type fields []*field + +// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth. +type indexFieldSorter struct { + fields fields +} + +func (x *indexFieldSorter) Len() int { + return len(x.fields) +} + +func (x *indexFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *indexFieldSorter) Less(i, j int) bool { + iIdx, jIdx := x.fields[i].idx, x.fields[j].idx + for k := 0; k < len(iIdx) && k < len(jIdx); k++ { + if iIdx[k] != jIdx[k] { + return iIdx[k] < jIdx[k] + } + } + return len(iIdx) <= len(jIdx) +} + +// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag. +type nameLevelAndTagFieldSorter struct { + fields fields +} + +func (x *nameLevelAndTagFieldSorter) Len() int { + return len(x.fields) +} + +func (x *nameLevelAndTagFieldSorter) Swap(i, j int) { + x.fields[i], x.fields[j] = x.fields[j], x.fields[i] +} + +func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool { + fi, fj := x.fields[i], x.fields[j] + if fi.name != fj.name { + return fi.name < fj.name + } + if len(fi.idx) != len(fj.idx) { + return len(fi.idx) < len(fj.idx) + } + if fi.tagged != fj.tagged { + return fi.tagged + } + return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters. +} + +// getFields returns visible fields of struct type t following visibility rules for JSON encoding. +func getFields(t reflect.Type) (flds fields, structOptions string) { + // Get special field "_" tag options + if f, ok := t.FieldByName("_"); ok { + tag := f.Tag.Get("cbor") + if tag != "-" { + structOptions = tag + } + } + + // nTypes contains next level anonymous fields' types and indexes + // (there can be multiple fields of the same type at the same level) + flds, nTypes := appendFields(t, nil, nil, nil) + + if len(nTypes) > 0 { + + var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes + vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels + + for len(nTypes) > 0 { + cTypes, nTypes = nTypes, nil + + for t, idx := range cTypes { + // If there are multiple anonymous fields of the same struct type at the same level, all are ignored. + if len(idx) > 1 { + continue + } + + // Anonymous field of the same type at deeper nested level is ignored. + if vTypes[t] { + continue + } + vTypes[t] = true + + flds, nTypes = appendFields(t, idx[0], flds, nTypes) + } + } + } + + sort.Sort(&nameLevelAndTagFieldSorter{flds}) + + // Keep visible fields. + j := 0 // index of next unique field + for i := 0; i < len(flds); { + name := flds[i].name + if i == len(flds)-1 || // last field + name != flds[i+1].name || // field i has unique field name + len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1 + (flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not + flds[j] = flds[i] + j++ + } + + // Skip fields with the same field name. + for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive + } + } + if j != len(flds) { + flds = flds[:j] + } + + // Sort fields by field index + sort.Sort(&indexFieldSorter{flds}) + + return flds, structOptions +} + +// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes . +func appendFields( + t reflect.Type, + idx []int, + flds fields, + nTypes map[reflect.Type][][]int, +) ( + _flds fields, + _nTypes map[reflect.Type][][]int, +) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + if !isFieldExportable(f, ft.Kind()) { + continue + } + + tag := f.Tag.Get("cbor") + if tag == "" { + tag = f.Tag.Get("json") + } + if tag == "-" { + continue + } + + tagged := tag != "" + + // Parse field tag options + var tagFieldName string + var omitempty, keyasint bool + for j := 0; tag != ""; j++ { + var token string + idx := strings.IndexByte(tag, ',') + if idx == -1 { + token, tag = tag, "" + } else { + token, tag = tag[:idx], tag[idx+1:] + } + if j == 0 { + tagFieldName = token + } else { + switch token { + case "omitempty": + omitempty = true + case "keyasint": + keyasint = true + } + } + } + + fieldName := tagFieldName + if tagFieldName == "" { + fieldName = f.Name + } + + fIdx := make([]int, len(idx)+1) + copy(fIdx, idx) + fIdx[len(fIdx)-1] = i + + if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" { + flds = append(flds, &field{ + name: fieldName, + idx: fIdx, + typ: f.Type, + omitEmpty: omitempty, + keyAsInt: keyasint, + tagged: tagged}) + } else { + if nTypes == nil { + nTypes = make(map[reflect.Type][][]int) + } + nTypes[ft] = append(nTypes[ft], fIdx) + } + } + + return flds, nTypes +} + +// isFieldExportable returns true if f is an exportable (regular or anonymous) field or +// a nonexportable anonymous field of struct type. +// Nonexportable anonymous field of struct type can contain exportable fields. +func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam + exportable := f.PkgPath == "" + return exportable || (f.Anonymous && fk == reflect.Struct) +} + +type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error) + +// getFieldValue returns field value of struct v by index. When encountering null pointer +// to anonymous (embedded) struct field, f is called with the last traversed field value. +func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) { + fv = v + for i, n := range idx { + fv = fv.Field(n) + + if i < len(idx)-1 { + if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct { + if fv.IsNil() { + // Null pointer to embedded struct field + fv, err = f(fv) + if err != nil || !fv.IsValid() { + return fv, err + } + } + fv = fv.Elem() + } + } + } + return fv, nil +} diff --git a/vendor/github.com/fxamacker/cbor/v2/tag.go b/vendor/github.com/fxamacker/cbor/v2/tag.go new file mode 100644 index 00000000..5c4d2b7a --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/tag.go @@ -0,0 +1,299 @@ +package cbor + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and +// unmarshaling of tag content is subject to any encode and decode options that would apply to +// enclosed data item if it were to appear outside of a tag. +type Tag struct { + Number uint64 + Content interface{} +} + +// RawTag represents CBOR tag data, including tag number and raw tag content. +// RawTag implements Unmarshaler and Marshaler interfaces. +type RawTag struct { + Number uint64 + Content RawMessage +} + +// UnmarshalCBOR sets *t with tag number and raw tag content copied from data. +func (t *RawTag) UnmarshalCBOR(data []byte) error { + if t == nil { + return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer") + } + + // Decoding CBOR null and undefined to cbor.RawTag is no-op. + if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) { + return nil + } + + d := decoder{data: data, dm: defaultDecMode} + + // Unmarshal tag number. + typ, _, num := d.getHead() + if typ != cborTypeTag { + return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()} + } + t.Number = num + + // Unmarshal tag content. + c := d.data[d.off:] + t.Content = make([]byte, len(c)) + copy(t.Content, c) + return nil +} + +// MarshalCBOR returns CBOR encoding of t. +func (t RawTag) MarshalCBOR() ([]byte, error) { + if t.Number == 0 && len(t.Content) == 0 { + // Marshal uninitialized cbor.RawTag + b := make([]byte, len(cborNil)) + copy(b, cborNil) + return b, nil + } + + e := getEncodeBuffer() + + encodeHead(e, byte(cborTypeTag), t.Number) + + content := t.Content + if len(content) == 0 { + content = cborNil + } + + buf := make([]byte, len(e.Bytes())+len(content)) + n := copy(buf, e.Bytes()) + copy(buf[n:], content) + + putEncodeBuffer(e) + return buf, nil +} + +// DecTagMode specifies how decoder handles tag number. +type DecTagMode int + +const ( + // DecTagIgnored makes decoder ignore tag number (skips if present). + DecTagIgnored DecTagMode = iota + + // DecTagOptional makes decoder verify tag number if it's present. + DecTagOptional + + // DecTagRequired makes decoder verify tag number and tag number must be present. + DecTagRequired + + maxDecTagMode +) + +func (dtm DecTagMode) valid() bool { + return dtm >= 0 && dtm < maxDecTagMode +} + +// EncTagMode specifies how encoder handles tag number. +type EncTagMode int + +const ( + // EncTagNone makes encoder not encode tag number. + EncTagNone EncTagMode = iota + + // EncTagRequired makes encoder encode tag number. + EncTagRequired + + maxEncTagMode +) + +func (etm EncTagMode) valid() bool { + return etm >= 0 && etm < maxEncTagMode +} + +// TagOptions specifies how encoder and decoder handle tag number. +type TagOptions struct { + DecTag DecTagMode + EncTag EncTagMode +} + +// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode +// to provide CBOR tag support. +type TagSet interface { + // Add adds given tag number(s), content type, and tag options to TagSet. + Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error + + // Remove removes given tag content type from TagSet. + Remove(contentType reflect.Type) + + tagProvider +} + +type tagProvider interface { + getTagItemFromType(t reflect.Type) *tagItem + getTypeFromTagNum(num []uint64) reflect.Type +} + +type tagItem struct { + num []uint64 + cborTagNum []byte + contentType reflect.Type + opts TagOptions +} + +func (t *tagItem) equalTagNum(num []uint64) bool { + // Fast path to compare 1 tag number + if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] { + return true + } + + if len(t.num) != len(num) { + return false + } + + for i := 0; i < len(t.num); i++ { + if t.num[i] != num[i] { + return false + } + } + + return true +} + +type ( + tagSet map[reflect.Type]*tagItem + + syncTagSet struct { + sync.RWMutex + t tagSet + } +) + +func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem { + return t[typ] +} + +func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type { + for typ, tag := range t { + if tag.equalTagNum(num) { + return typ + } + } + return nil +} + +// NewTagSet returns TagSet (safe for concurrency). +func NewTagSet() TagSet { + return &syncTagSet{t: make(map[reflect.Type]*tagItem)} +} + +// Add adds given tag number(s), content type, and tag options to TagSet. +func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error { + if contentType == nil { + return errors.New("cbor: cannot add nil content type to TagSet") + } + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + tag, err := newTagItem(opts, contentType, num, nestedNum...) + if err != nil { + return err + } + t.Lock() + defer t.Unlock() + for typ, ti := range t.t { + if typ == contentType { + return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet") + } + if ti.equalTagNum(tag.num) { + return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num) + } + } + t.t[contentType] = tag + return nil +} + +// Remove removes given tag content type from TagSet. +func (t *syncTagSet) Remove(contentType reflect.Type) { + for contentType.Kind() == reflect.Ptr { + contentType = contentType.Elem() + } + t.Lock() + delete(t.t, contentType) + t.Unlock() +} + +func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem { + t.RLock() + ti := t.t[typ] + t.RUnlock() + return ti +} + +func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type { + t.RLock() + rt := t.t.getTypeFromTagNum(num) + t.RUnlock() + return rt +} + +func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) { + if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone { + return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet") + } + if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface { + return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String()) + } + if contentType == typeTime { + return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if contentType == typeBigInt { + return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically") + } + if contentType == typeTag { + return nil, errors.New("cbor: cannot add cbor.Tag to TagSet") + } + if contentType == typeRawTag { + return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet") + } + if num == 0 || num == 1 { + return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead") + } + if num == 2 || num == 3 { + return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically") + } + if num == tagNumSelfDescribedCBOR { + return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically") + } + + te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType} + te.num = append(te.num, nestedNum...) + + // Cache encoded tag numbers + e := getEncodeBuffer() + for _, n := range te.num { + encodeHead(e, byte(cborTypeTag), n) + } + te.cborTagNum = make([]byte, e.Len()) + copy(te.cborTagNum, e.Bytes()) + putEncodeBuffer(e) + + return &te, nil +} + +var ( + typeTag = reflect.TypeOf(Tag{}) + typeRawTag = reflect.TypeOf(RawTag{}) +) + +// WrongTagError describes mismatch between CBOR tag and registered tag. +type WrongTagError struct { + RegisteredType reflect.Type + RegisteredTagNum []uint64 + TagNum []uint64 +} + +func (e *WrongTagError) Error() string { + return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum) +} diff --git a/vendor/github.com/fxamacker/cbor/v2/valid.go b/vendor/github.com/fxamacker/cbor/v2/valid.go new file mode 100644 index 00000000..b40793b9 --- /dev/null +++ b/vendor/github.com/fxamacker/cbor/v2/valid.go @@ -0,0 +1,394 @@ +// Copyright (c) Faye Amacker. All rights reserved. +// Licensed under the MIT License. See LICENSE in the project root for license information. + +package cbor + +import ( + "encoding/binary" + "errors" + "io" + "math" + "strconv" + + "github.com/x448/float16" +) + +// SyntaxError is a description of a CBOR syntax error. +type SyntaxError struct { + msg string +} + +func (e *SyntaxError) Error() string { return e.msg } + +// SemanticError is a description of a CBOR semantic error. +type SemanticError struct { + msg string +} + +func (e *SemanticError) Error() string { return e.msg } + +// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags. +type MaxNestedLevelError struct { + maxNestedLevels int +} + +func (e *MaxNestedLevelError) Error() string { + return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels) +} + +// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays. +type MaxArrayElementsError struct { + maxArrayElements int +} + +func (e *MaxArrayElementsError) Error() string { + return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array" +} + +// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps. +type MaxMapPairsError struct { + maxMapPairs int +} + +func (e *MaxMapPairsError) Error() string { + return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map" +} + +// IndefiniteLengthError indicates found disallowed indefinite length items. +type IndefiniteLengthError struct { + t cborType +} + +func (e *IndefiniteLengthError) Error() string { + return "cbor: indefinite-length " + e.t.String() + " isn't allowed" +} + +// TagsMdError indicates found disallowed CBOR tags. +type TagsMdError struct { +} + +func (e *TagsMdError) Error() string { + return "cbor: CBOR tag isn't allowed" +} + +// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item. +type ExtraneousDataError struct { + numOfBytes int // number of bytes of extraneous data + index int // location of extraneous data +} + +func (e *ExtraneousDataError) Error() string { + return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index) +} + +// wellformed checks whether the CBOR data item is well-formed. +// allowExtraData indicates if extraneous data is allowed after the CBOR data item. +// - use allowExtraData = true when using Decoder.Decode() +// - use allowExtraData = false when using Unmarshal() +func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error { + if len(d.data) == d.off { + return io.EOF + } + _, err := d.wellformedInternal(0, checkBuiltinTags) + if err == nil { + if !allowExtraData && d.off != len(d.data) { + err = &ExtraneousDataError{len(d.data) - d.off, d.off} + } + } + return err +} + +// wellformedInternal checks data's well-formedness and returns max depth and error. +func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo + t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag() + if err != nil { + return 0, err + } + + switch t { + case cborTypeByteString, cborTypeTextString: + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteString(t, depth, checkBuiltinTags) + } + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow") + } + if len(d.data)-d.off < valInt { // valInt+off may overflow integer + return 0, io.ErrUnexpectedEOF + } + d.off += valInt + + case cborTypeArray, cborTypeMap: + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + + if indefiniteLength { + if d.dm.indefLength == IndefLengthForbidden { + return 0, &IndefiniteLengthError{t} + } + return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags) + } + + valInt := int(val) + if valInt < 0 { + // Detect integer overflow + return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow") + } + + if t == cborTypeArray { + if valInt > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if valInt > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + + count := 1 + if t == cborTypeMap { + count = 2 + } + maxDepth := depth + for j := 0; j < count; j++ { + for i := 0; i < valInt; i++ { + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt // Save max depth + } + } + } + depth = maxDepth + + case cborTypeTag: + if d.dm.tagsMd == TagsForbidden { + return 0, &TagsMdError{} + } + + tagNum := val + + // Scan nested tag numbers to avoid recursion. + for { + if len(d.data) == d.off { // Tag number must be followed by tag content. + return 0, io.ErrUnexpectedEOF + } + if checkBuiltinTags { + err = validBuiltinTag(tagNum, d.data[d.off]) + if err != nil { + return 0, err + } + } + if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) { + return 0, &UnacceptableDataItemError{ + CBORType: cborTypeTag.String(), + Message: "bignum", + } + } + if getType(d.data[d.off]) != cborTypeTag { + break + } + if _, _, tagNum, err = d.wellformedHead(); err != nil { + return 0, err + } + depth++ + if depth > d.dm.maxNestedLevels { + return 0, &MaxNestedLevelError{d.dm.maxNestedLevels} + } + } + // Check tag content. + return d.wellformedInternal(depth, checkBuiltinTags) + } + + return depth, nil +} + +// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + // Peek ahead to get next type and indefinite length status. + nt, ai := parseInitialByte(d.data[d.off]) + if t != nt { + return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()} + } + if additionalInformation(ai).isIndefiniteLength() { + return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"} + } + if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + } + return depth, nil +} + +// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error. +func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) { + var err error + maxDepth := depth + i := 0 + for { + if len(d.data) == d.off { + return 0, io.ErrUnexpectedEOF + } + if isBreakFlag(d.data[d.off]) { + d.off++ + break + } + var dpt int + if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil { + return 0, err + } + if dpt > maxDepth { + maxDepth = dpt + } + i++ + if t == cborTypeArray { + if i > d.dm.maxArrayElements { + return 0, &MaxArrayElementsError{d.dm.maxArrayElements} + } + } else { + if i%2 == 0 && i/2 > d.dm.maxMapPairs { + return 0, &MaxMapPairsError{d.dm.maxMapPairs} + } + } + } + if t == cborTypeMap && i%2 == 1 { + return 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return maxDepth, nil +} + +func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() ( + t cborType, + ai byte, + val uint64, + indefiniteLength bool, + err error, +) { + t, ai, val, err = d.wellformedHead() + if err != nil { + return + } + indefiniteLength = additionalInformation(ai).isIndefiniteLength() + return +} + +func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) { + dataLen := len(d.data) - d.off + if dataLen == 0 { + return 0, 0, 0, io.ErrUnexpectedEOF + } + + t, ai = parseInitialByte(d.data[d.off]) + val = uint64(ai) + d.off++ + dataLen-- + + if ai <= maxAdditionalInformationWithoutArgument { + return t, ai, val, nil + } + + if ai == additionalInformationWith1ByteArgument { + const argumentSize = 1 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(d.data[d.off]) + d.off++ + if t == cborTypePrimitives && val < 32 { + return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()} + } + return t, ai, val, nil + } + + if ai == additionalInformationWith2ByteArgument { + const argumentSize = 2 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith4ByteArgument { + const argumentSize = 4 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize])) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if ai == additionalInformationWith8ByteArgument { + const argumentSize = 8 + if dataLen < argumentSize { + return 0, 0, 0, io.ErrUnexpectedEOF + } + val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize]) + d.off += argumentSize + if t == cborTypePrimitives { + if err := d.acceptableFloat(math.Float64frombits(val)); err != nil { + return 0, 0, 0, err + } + } + return t, ai, val, nil + } + + if additionalInformation(ai).isIndefiniteLength() { + switch t { + case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag: + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} + case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite(). + return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"} + } + return t, ai, val, nil + } + + // ai == 28, 29, 30 + return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()} +} + +func (d *decoder) acceptableFloat(f float64) error { + switch { + case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point NaN", + } + case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0): + return &UnacceptableDataItemError{ + CBORType: cborTypePrimitives.String(), + Message: "floating-point infinity", + } + } + return nil +} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index a8c29bfb..7c7f0c69 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-logr/logr)](https://goreportcard.com/report/github.com/go-logr/logr) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging @@ -91,11 +92,12 @@ logr design but also left out some parts and changed others: | Adding a name to a logger | `WithName` | no API | | Modify verbosity of log entries in a call chain | `V` | no API | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | The high-level slog API is explicitly meant to be one of many different APIs that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) -package. +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. ### Inspiration @@ -145,24 +147,24 @@ There are implementations for the following logging libraries: ## slog interoperability Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and -`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. `slogr` itself leaves that to the caller. +slog API. -## Using a `logr.Sink` as backend for slog +### Using a `logr.LogSink` as backend for slog Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +implementing both the normal logr interface(s) and `SlogSink`. Because of a conflict in the parameters of the common `Enabled` method, it is [not possible to implement both slog.Handler and logr.Sink in the same type](https://github.com/golang/go/issues/59110). If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can convert back and forth without adding additional wrappers, with one exception: when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future log calls. Such an implementation should also support values that implement specific @@ -187,13 +189,13 @@ Not supporting slog has several drawbacks: These drawbacks are severe enough that applications using a mixture of slog and logr should switch to a different backend. -## Using a `slog.Handler` as backend for logr +### Using a `slog.Handler` as backend for logr Using a plain `slog.Handler` without support for logr works better than the other direction: - All logr verbosity levels can be mapped 1:1 to their corresponding slog level by negating them. -- Stack unwinding is done by the `slogr.SlogSink` and the resulting program +- Stack unwinding is done by the `SlogSink` and the resulting program counter is passed to the `slog.Handler`. - Names added via `Logger.WithName` are gathered and recorded in an additional attribute with `logger` as key and the names separated by slash as value. @@ -205,27 +207,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility with logr implementations without slog support is not important, then `slog.Valuer` is sufficient. -## Context support for slog +### Context support for slog Storing a logger in a `context.Context` is not supported by -slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this -to fill this gap: - - func HandlerFromContext(ctx context.Context) slog.Handler { - logger, err := logr.FromContext(ctx) - if err == nil { - return slogr.NewSlogHandler(logger) - } - return slog.Default().Handler() - } - - func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { - return logr.NewContext(ctx, slogr.NewLogr(handler)) - } - -The downside is that storing and retrieving a `slog.Handler` needs more -allocations compared to using a `logr.Logger`. Therefore the recommendation is -to use the `logr.Logger` API in code which uses contextual logging. +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 00000000..de8bcc3a --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 00000000..f012f9a1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 00000000..065ef0b8 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 12e5807c..30568e76 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -100,6 +100,11 @@ type Options struct { // details, see docs for Go's time.Layout. TimestampFormat string + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + // Verbosity tells funcr which V logs to produce. Higher values enable // more logs. Info logs at or below this level will be written, while logs // above this level will be discarded. @@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { if opts.MaxLogDepth == 0 { opts.MaxLogDepth = defaultMaxLogDepth } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } f := Formatter{ outputFormat: outfmt, prefix: "", @@ -233,6 +242,8 @@ type Formatter struct { valuesStr string depth int opts *Options + groupName string // for slog groups + groups []groupDef } // outputFormat indicates which outputFormat to use. @@ -245,6 +256,13 @@ const ( outputJSON ) +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any @@ -252,63 +270,125 @@ type PseudoStruct []any func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { - buf.WriteByte('{') + buf.WriteByte('{') // for the whole record } + + // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape + f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { + + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, true) // escape user-provided keys + + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue + } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) + } + + if bodyStr != "" { if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } + buf.WriteByte(f.comma()) } + buf.WriteString(bodyStr) + } + + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record + } + + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true + } + + continuing := false + if values != "" { + buf.WriteString(values) continuing = true - buf.WriteString(f.valuesStr) } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - if f.outputFormat == outputJSON { + + if needClosingBrace { buf.WriteByte('}') } + return buf.String() } -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } + copied := false for i := 0; i < len(kvList); i += 2 { k, ok := kvList[i].(string) if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } k = f.nonStringKey(kvList[i]) kvList[i] = k } v := kvList[i+1] - if i > 0 || continuing { + if i > 0 { if f.outputFormat == outputJSON { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } else { // In theory the format could be something we don't understand. In // practice, we control it, so it won't be. @@ -316,24 +396,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } } - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) buf.WriteString(f.pretty(v)) } return kvList } +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -407,12 +498,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } for i := 0; i < len(v); i += 2 { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping buf.WriteString(prettyString(k)) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -481,7 +572,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { continue } if printComma { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { @@ -492,10 +583,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { name = fld.Name } // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -520,7 +609,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } e := v.Index(i) buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) @@ -534,7 +623,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { i := 0 for it.Next() { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } // If a map key supports TextMarshaler, use it. keystr := "" @@ -556,7 +645,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } } buf.WriteString(keystr) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) i++ } @@ -706,6 +795,24 @@ func (f Formatter) sanitize(kvList []any) []any { return kvList } +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(name string) { + // Unnamed groups are just inlined. + if name == "" { + return + } + + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) + + // Start collecting new values. + f.groupName = name + f.valuesStr = "" + f.values = nil +} + // Init configures this Formatter from runtime info, such as the call depth // imposed by logr itself. // Note that this receiver is a pointer, so depth can be saved. @@ -740,7 +847,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args if policy := f.opts.LogCaller; policy == All || policy == Info { args = append(args, "caller", f.caller()) } - args = append(args, "level", level, "msg", msg) + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) return prefix, f.render(args, kvList) } @@ -793,7 +903,7 @@ func (f *Formatter) AddValues(kvList []any) { // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys + f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 00000000..7bd84761 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 2a5075a1..b4428e10 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -207,10 +207,6 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries // implementing LogSink, rather than end users. Passing a nil sink will create // a Logger which discards all log lines. @@ -410,45 +406,6 @@ func (l Logger) IsZero() bool { return l.sink == nil } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - // RuntimeInfo holds information that the logr "core" library knows which // LogSinks might want to know. type RuntimeInfo struct { diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go new file mode 100644 index 00000000..82d1ba49 --- /dev/null +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -0,0 +1,192 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(ToSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because FromSlogHandler needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + } + clone.sink = l.sink.WithValues(kvList...) + } + return &clone +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink + } else { + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) + } + + return kvList +} + +func addPrefix(prefix, name string) string { + if prefix == "" { + return name + } + if name == "" { + return prefix + } + return prefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original Logger had a V level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 00000000..28a83d02 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go new file mode 100644 index 00000000..4060fcbc --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -0,0 +1,120 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" + "runtime" + "time" +) + +var ( + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewFromLogHandler. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + _ = l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) LogSink { + if l.name != "" { + l.name += "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.editorconfig rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitattributes rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md similarity index 95% rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md index 61d8ebff..2ce45dd4 100644 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md similarity index 88% rename from vendor/github.com/go-task/slim-sprig/README.md rename to vendor/github.com/go-task/slim-sprig/v3/README.md index 72579471..b5ab5642 100644 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -1,4 +1,4 @@ -# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with all functions that depend on external (non standard library) or crypto packages diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml index cdcfd223..8e6346bb 100644 --- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/crypto.go rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/network.go rename to vendor/github.com/go-task/slim-sprig/v3/network.go diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/regex.go rename to vendor/github.com/go-task/slim-sprig/v3/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/url.go rename to vendor/github.com/go-task/slim-sprig/v3/url.go diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go index 6c16c255..c6f66f10 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -56,6 +56,7 @@ type Unmarshaler struct { // implement JSONPBMarshaler so that the custom format can be produced. // // The JSON unmarshaling must follow the JSON to proto specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go index 685c80a6..e9438a93 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -55,6 +55,7 @@ type Marshaler struct { // implement JSONPBUnmarshaler so that the custom format can be parsed. // // The JSON marshaling must follow the proto to JSON specification: +// // https://developers.google.com/protocol-buffers/docs/proto3#json // // Deprecated: Custom types should implement protobuf reflection instead. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 85f9f573..fdff3fdb 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -127,9 +127,10 @@ func Is(any *anypb.Any, m proto.Message) bool { // The allocated message is stored in the embedded proto.Message. // // Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. diff --git a/vendor/github.com/google/gnostic-models/LICENSE b/vendor/github.com/google/gnostic-models/LICENSE new file mode 100644 index 00000000..6b0b1270 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/google/gnostic-models/compiler/README.md b/vendor/github.com/google/gnostic-models/compiler/README.md new file mode 100644 index 00000000..ee9783d2 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/compiler/README.md @@ -0,0 +1,4 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic +extensions. diff --git a/vendor/github.com/google/gnostic-models/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go new file mode 100644 index 00000000..1bfe9612 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/compiler/context.go @@ -0,0 +1,49 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + yaml "gopkg.in/yaml.v3" +) + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + Node *yaml.Node + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, node *yaml.Node, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + name := context.Name + if context.Parent != nil { + name = context.Parent.Description() + "." + name + } + return name +} diff --git a/vendor/github.com/google/gnostic-models/compiler/error.go b/vendor/github.com/google/gnostic-models/compiler/error.go new file mode 100644 index 00000000..6f40515d --- /dev/null +++ b/vendor/github.com/google/gnostic-models/compiler/error.go @@ -0,0 +1,70 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import "fmt" + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +func (err *Error) locationDescription() string { + if err.Context.Node != nil { + return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description()) + } + return err.Context.Description() +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return err.Message + } + return err.locationDescription() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/google/gnostic-models/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go new file mode 100644 index 00000000..250c81e8 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go @@ -0,0 +1,86 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + yaml "gopkg.in/yaml.v3" + + extensions "github.com/google/gnostic-models/extensions" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// CallExtension calls a binary extension handler. +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { + if context == nil || context.ExtensionHandlers == nil { + return false, nil, nil + } + handled = false + for _, handler := range *(context.ExtensionHandlers) { + response, err = handler.handle(in, extensionName) + if response == nil { + continue + } else { + handled = true + break + } + } + return handled, response, err +} + +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + yamlData, _ := yaml.Marshal(in) + request := &extensions.ExtensionHandlerRequest{ + CompilerVersion: &extensions.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + Wrapper: &extensions.Wrapper{ + Version: "unknown", // TODO: set this to the type/version of spec being parsed. + Yaml: string(yamlData), + ExtensionName: extensionName, + }, + } + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + if err != nil { + return nil, err + } + response := &extensions.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil || !response.Handled { + return nil, err + } + if len(response.Errors) != 0 { + return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/google/gnostic-models/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go new file mode 100644 index 00000000..975d65e8 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go @@ -0,0 +1,397 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "regexp" + "sort" + "strconv" + + "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/jsonschema" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a *yaml.Node if possible. +func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { + if in == nil { + return nil, false + } + return in, true +} + +// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. +func SortedKeysForMap(m *yaml.Node) []string { + keys := make([]string, 0) + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + keys = append(keys, m.Content[i].Value) + } + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. +func MapHasKey(m *yaml.Node, key string) bool { + if m == nil { + return false + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return true + } + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m *yaml.Node, key string) *yaml.Node { + if m == nil { + return nil + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return m.Content[i+1] + } + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// SequenceNodeForNode returns a node if it is a SequenceNode. +func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { + if node.Kind != yaml.SequenceNode { + return nil, false + } + return node, true +} + +// BoolForScalarNode returns the bool value of a node. +func BoolForScalarNode(node *yaml.Node) (bool, bool) { + if node == nil { + return false, false + } + if node.Kind == yaml.DocumentNode { + return BoolForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return false, false + } + if node.Tag != "!!bool" { + return false, false + } + v, err := strconv.ParseBool(node.Value) + if err != nil { + return false, false + } + return v, true +} + +// IntForScalarNode returns the integer value of a node. +func IntForScalarNode(node *yaml.Node) (int64, bool) { + if node == nil { + return 0, false + } + if node.Kind == yaml.DocumentNode { + return IntForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0, false + } + if node.Tag != "!!int" { + return 0, false + } + v, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +// FloatForScalarNode returns the float value of a node. +func FloatForScalarNode(node *yaml.Node) (float64, bool) { + if node == nil { + return 0.0, false + } + if node.Kind == yaml.DocumentNode { + return FloatForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0.0, false + } + if (node.Tag != "!!int") && (node.Tag != "!!float") { + return 0.0, false + } + v, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return 0.0, false + } + return v, true +} + +// StringForScalarNode returns the string value of a node. +func StringForScalarNode(node *yaml.Node) (string, bool) { + if node == nil { + return "", false + } + if node.Kind == yaml.DocumentNode { + return StringForScalarNode(node.Content[0]) + } + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!int": + return node.Value, true + case "!!str": + return node.Value, true + case "!!timestamp": + return node.Value, true + case "!!null": + return "", true + default: + return "", false + } + default: + return "", false + } +} + +// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. +func StringArrayForSequenceNode(node *yaml.Node) []string { + stringArray := make([]string, 0) + for _, item := range node.Content { + v, ok := StringForScalarNode(item) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + if m == nil || m.Kind != yaml.MappingNode { + return invalidKeys + } + for i := 0; i < len(m.Content); i += 2 { + key := m.Content[i].Value + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + return invalidKeys +} + +// NewNullNode creates a new Null node. +func NewNullNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!null", + } + return node +} + +// NewMappingNode creates a new Mapping node. +func NewMappingNode() *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: make([]*yaml.Node, 0), + } +} + +// NewSequenceNode creates a new Sequence node. +func NewSequenceNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + return node +} + +// NewScalarNodeForString creates a new node to hold a string. +func NewScalarNodeForString(s string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: s, + } +} + +// NewSequenceNodeForStringArray creates a new node to hold an array of strings. +func NewSequenceNodeForStringArray(strings []string) *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + for _, s := range strings { + node.Content = append(node.Content, NewScalarNodeForString(s)) + } + return node +} + +// NewScalarNodeForBool creates a new node to hold a bool. +func NewScalarNodeForBool(b bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", b), + } +} + +// NewScalarNodeForFloat creates a new node to hold a float. +func NewScalarNodeForFloat(f float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%g", f), + } +} + +// NewScalarNodeForInt creates a new node to hold an integer. +func NewScalarNodeForInt(i int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", i), + } +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} + +// Description returns a human-readable represention of an item. +func Description(item interface{}) string { + value, ok := item.(*yaml.Node) + if ok { + return jsonschema.Render(value) + } + return fmt.Sprintf("%+v", item) +} + +// Display returns a description of a node for use in error messages. +func Display(node *yaml.Node) string { + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!str": + return fmt.Sprintf("%s (string)", node.Value) + } + } + return fmt.Sprintf("%+v (%T)", node, node) +} + +// Marshal creates a yaml version of a structure in our preferred style +func Marshal(in *yaml.Node) []byte { + clearStyle(in) + //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) + bytes, _ := yaml.Marshal(in) + + return bytes +} + +func clearStyle(node *yaml.Node) { + node.Style = 0 + for _, c := range node.Content { + clearStyle(c) + } +} diff --git a/vendor/github.com/google/gnostic-models/compiler/main.go b/vendor/github.com/google/gnostic-models/compiler/main.go new file mode 100644 index 00000000..ce9fcc45 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/google/gnostic-models/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go new file mode 100644 index 00000000..be0e8b40 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/compiler/reader.go @@ -0,0 +1,307 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" + "sync" + + yaml "gopkg.in/yaml.v3" +) + +var verboseReader = false + +var fileCache map[string][]byte +var infoCache map[string]*yaml.Node + +var fileCacheEnable = true +var infoCacheEnable = true + +// These locks are used to synchronize accesses to the fileCache and infoCache +// maps (above). They are global state and can throw thread-related errors +// when modified from separate goroutines. The general strategy is to protect +// all public functions in this file with mutex Lock() calls. As a result, to +// avoid deadlock, these public functions should not call other public +// functions, so some public functions have private equivalents. +// In the future, we might consider replacing the maps with sync.Map and +// eliminating these mutexes. +var fileCacheMutex sync.Mutex +var infoCacheMutex sync.Mutex + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]*yaml.Node, 0) + } +} + +// EnableFileCache turns on file caching. +func EnableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCacheEnable = true +} + +// EnableInfoCache turns on parsed info caching. +func EnableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCacheEnable = true +} + +// DisableFileCache turns off file caching. +func DisableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCacheEnable = false +} + +// DisableInfoCache turns off parsed info caching. +func DisableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCacheEnable = false +} + +// RemoveFromFileCache removes an entry from the file cache. +func RemoveFromFileCache(fileurl string) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + if !fileCacheEnable { + return + } + initializeFileCache() + delete(fileCache, fileurl) +} + +// RemoveFromInfoCache removes an entry from the info cache. +func RemoveFromInfoCache(filename string) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + if !infoCacheEnable { + return + } + initializeInfoCache() + delete(infoCache, filename) +} + +// GetInfoCache returns the info cache map. +func GetInfoCache() map[string]*yaml.Node { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + if infoCache == nil { + initializeInfoCache() + } + return infoCache +} + +// ClearFileCache clears the file cache. +func ClearFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCache = make(map[string][]byte, 0) +} + +// ClearInfoCache clears the info cache. +func ClearInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCache = make(map[string]*yaml.Node) +} + +// ClearCaches clears all caches. +func ClearCaches() { + ClearFileCache() + ClearInfoCache() +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return fetchFile(fileurl) +} + +func fetchFile(fileurl string) ([]byte, error) { + var bytes []byte + initializeFileCache() + if fileCacheEnable { + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) + } + } + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode != 200 { + return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) + } + bytes, err = ioutil.ReadAll(response.Body) + if fileCacheEnable && err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return readBytesForFile(filename) +} + +func readBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := fetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a *yaml.Node. +func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + return readInfoFromBytes(filename, bytes) +} + +func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + initializeInfoCache() + if infoCacheEnable { + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + } + var info yaml.Node + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + if infoCacheEnable && len(filename) > 0 { + infoCache[filename] = &info + } + return &info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + initializeInfoCache() + if infoCacheEnable { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + } + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = parts[0] + if _, err := url.ParseRequestURI(parts[0]); err != nil { + // It is not an URL, so the file is local + filename = basedir + parts[0] + } + } else { + filename = basefile + } + bytes, err := readBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := readInfoFromBytes(filename, bytes) + if info != nil && info.Kind == yaml.DocumentNode { + info = info.Content[0] + } + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if info == nil { + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m := info + if true { + found := false + for i := 0; i < len(m.Content); i += 2 { + if m.Content[i].Value == key { + info = m.Content[i+1] + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + if infoCacheEnable { + infoCache[ref] = info + } + return info, nil +} diff --git a/vendor/github.com/google/gnostic-models/extensions/README.md b/vendor/github.com/google/gnostic-models/extensions/README.md new file mode 100644 index 00000000..4b5d63e5 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/extensions/README.md @@ -0,0 +1,13 @@ +# Extensions + +**Extension Support is experimental.** + +This directory contains support code for building Gnostic extensio handlers and +associated examples. + +Extension handlers can be used to compile vendor or specification extensions +into protocol buffer structures. + +Like plugins, extension handlers are built as separate executables. Extension +bodies are written to extension handlers as serialized +ExtensionHandlerRequests. diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go new file mode 100644 index 00000000..a71df8ab --- /dev/null +++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go @@ -0,0 +1,461 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.19.3 +// source: extensions/extension.proto + +package gnostic_extension_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The version number of Gnostic. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil { + return x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil { + return x.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension to process. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` + // The version number of Gnostic. + CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` +} + +func (x *ExtensionHandlerRequest) Reset() { + *x = ExtensionHandlerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionHandlerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionHandlerRequest) ProtoMessage() {} + +func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if x != nil { + return x.Wrapper + } + return nil +} + +func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` + // Error message(s). If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` + // text output + Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ExtensionHandlerResponse) Reset() { + *x = ExtensionHandlerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionHandlerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionHandlerResponse) ProtoMessage() {} + +func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{2} +} + +func (x *ExtensionHandlerResponse) GetHandled() bool { + if x != nil { + return x.Handled + } + return false +} + +func (x *ExtensionHandlerResponse) GetErrors() []string { + if x != nil { + return x.Errors + } + return nil +} + +func (x *ExtensionHandlerResponse) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +type Wrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the extension. + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` + // YAML-formatted extension value. + Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Wrapper) Reset() { + *x = Wrapper{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Wrapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Wrapper) ProtoMessage() {} + +func (x *Wrapper) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. +func (*Wrapper) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{3} +} + +func (x *Wrapper) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Wrapper) GetExtensionName() string { + if x != nil { + return x.ExtensionName + } + return "" +} + +func (x *Wrapper) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +var File_extensions_extension_proto protoreflect.FileDescriptor + +var file_extensions_extension_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, + 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, + 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_extensions_extension_proto_rawDescOnce sync.Once + file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc +) + +func file_extensions_extension_proto_rawDescGZIP() []byte { + file_extensions_extension_proto_rawDescOnce.Do(func() { + file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) + }) + return file_extensions_extension_proto_rawDescData +} + +var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_extensions_extension_proto_goTypes = []interface{}{ + (*Version)(nil), // 0: gnostic.extension.v1.Version + (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest + (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse + (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper + (*anypb.Any)(nil), // 4: google.protobuf.Any +} +var file_extensions_extension_proto_depIdxs = []int32{ + 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper + 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version + 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_extensions_extension_proto_init() } +func file_extensions_extension_proto_init() { + if File_extensions_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Wrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_extensions_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_extensions_extension_proto_goTypes, + DependencyIndexes: file_extensions_extension_proto_depIdxs, + MessageInfos: file_extensions_extension_proto_msgTypes, + }.Build() + File_extensions_extension_proto = out.File + file_extensions_extension_proto_rawDesc = nil + file_extensions_extension_proto_goTypes = nil + file_extensions_extension_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.proto b/vendor/github.com/google/gnostic-models/extensions/extension.proto new file mode 100644 index 00000000..875137c1 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/extensions/extension.proto @@ -0,0 +1,97 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gnostic.extension.v1; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "GnosticExtension"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.gnostic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +// "Gnostic Extension" +option objc_class_prefix = "GNX"; + +// The Go package name. +option go_package = "./extensions;gnostic_extension_v1"; + +// The version number of Gnostic. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The extension to process. + Wrapper wrapper = 1; + + // The version number of Gnostic. + Version compiler_version = 2; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message(s). If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string errors = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension. + string extension_name = 2; + + // YAML-formatted extension value. + string yaml = 3; +} diff --git a/vendor/github.com/google/gnostic-models/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go new file mode 100644 index 00000000..ec8afd00 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/extensions/extensions.go @@ -0,0 +1,64 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gnostic_extension_v1 + +import ( + "io/ioutil" + "log" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +// Main implements the main program of an extension handler. +func Main(handler extensionHandler) { + // unpack the request + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + log.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + log.Println("Input error:", err.Error()) + os.Exit(1) + } + // call the handler + handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) + // respond with the output of the handler + response := &ExtensionHandlerResponse{ + Handled: false, // default assumption + Errors: make([]string, 0), + } + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } else if handled { + response.Handled = true + response.Value, err = ptypes.MarshalAny(output) + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } + } + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/google/gnostic-models/jsonschema/README.md b/vendor/github.com/google/gnostic-models/jsonschema/README.md new file mode 100644 index 00000000..6793c517 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/README.md @@ -0,0 +1,4 @@ +# jsonschema + +This directory contains code for reading, writing, and manipulating JSON +schemas. diff --git a/vendor/github.com/google/gnostic-models/jsonschema/base.go b/vendor/github.com/google/gnostic-models/jsonschema/base.go new file mode 100644 index 00000000..5fcc4885 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/base.go @@ -0,0 +1,97 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package jsonschema + +import ( + "encoding/base64" +) + +func baseSchemaBytes() ([]byte, error){ + return base64.StdEncoding.DecodeString( +`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi +JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl +c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK +ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg +ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp +bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp +dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl +ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK +ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v +bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg +ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki +LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p +bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s +CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog +ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg +ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog +ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg +ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog +ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 +IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog +ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 +ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl +ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw +ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg +ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg +ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg +ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg +IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 +aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg +ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg +ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg +ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK +ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi +ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP +ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy +ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg +ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv +ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi +OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl +SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs +dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k +ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk +cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl +cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh +ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg +ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk +ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk +ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 +IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi +b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 +LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl +cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv +bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog +ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq +ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg +ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg +ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg +ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg +ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu +aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh +bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl +cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs +CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs +ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg +ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg +ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy +YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 +IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg +fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 +IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} diff --git a/vendor/github.com/google/gnostic-models/jsonschema/display.go b/vendor/github.com/google/gnostic-models/jsonschema/display.go new file mode 100644 index 00000000..028a760a --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/display.go @@ -0,0 +1,229 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "strings" +) + +// +// DISPLAY +// The following methods display Schemas. +// + +// Description returns a string representation of a string or string array. +func (s *StringOrStringArray) Description() string { + if s.String != nil { + return *s.String + } + if s.StringArray != nil { + return strings.Join(*s.StringArray, ", ") + } + return "" +} + +// Returns a string representation of a Schema. +func (schema *Schema) String() string { + return schema.describeSchema("") +} + +// Helper: Returns a string representation of a Schema indented by a specified string. +func (schema *Schema) describeSchema(indent string) string { + result := "" + if schema.Schema != nil { + result += indent + "$schema: " + *(schema.Schema) + "\n" + } + if schema.ID != nil { + result += indent + "id: " + *(schema.ID) + "\n" + } + if schema.MultipleOf != nil { + result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) + } + if schema.Maximum != nil { + result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) + } + if schema.ExclusiveMaximum != nil { + result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) + } + if schema.ExclusiveMinimum != nil { + result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) + } + if schema.MinLength != nil { + result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) + } + if schema.Pattern != nil { + result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) + } + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + result += indent + "additionalItems:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalItems.Boolean) + result += indent + fmt.Sprintf("additionalItems: %+v\n", b) + } + } + if schema.Items != nil { + result += indent + "items:\n" + items := schema.Items + if items.SchemaArray != nil { + for i, s := range *(items.SchemaArray) { + result += indent + " " + fmt.Sprintf("%d", i) + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } else if items.Schema != nil { + result += items.Schema.describeSchema(indent + " " + " ") + } + } + if schema.MaxItems != nil { + result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) + } + if schema.MinItems != nil { + result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) + } + if schema.UniqueItems != nil { + result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) + } + if schema.MaxProperties != nil { + result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) + } + if schema.MinProperties != nil { + result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) + } + if schema.Required != nil { + result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) + } + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + result += indent + "additionalProperties:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalProperties.Boolean) + result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) + } + } + if schema.Properties != nil { + result += indent + "properties:\n" + for _, pair := range *(schema.Properties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.PatternProperties != nil { + result += indent + "patternProperties:\n" + for _, pair := range *(schema.PatternProperties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Dependencies != nil { + result += indent + "dependencies:\n" + for _, pair := range *(schema.Dependencies) { + name := pair.Name + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } else { + a := schemaOrStringArray.StringArray + if a != nil { + result += indent + " " + name + ":\n" + for _, s2 := range *a { + result += indent + " " + " " + s2 + "\n" + } + } + } + + } + } + if schema.Enumeration != nil { + result += indent + "enumeration:\n" + for _, value := range *(schema.Enumeration) { + if value.String != nil { + result += indent + " " + fmt.Sprintf("%+v\n", *value.String) + } else { + result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) + } + } + } + if schema.Type != nil { + result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) + } + if schema.AllOf != nil { + result += indent + "allOf:\n" + for _, s := range *(schema.AllOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.AnyOf != nil { + result += indent + "anyOf:\n" + for _, s := range *(schema.AnyOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.OneOf != nil { + result += indent + "oneOf:\n" + for _, s := range *(schema.OneOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.Not != nil { + result += indent + "not:\n" + result += schema.Not.describeSchema(indent + " ") + } + if schema.Definitions != nil { + result += indent + "definitions:\n" + for _, pair := range *(schema.Definitions) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Title != nil { + result += indent + "title: " + *(schema.Title) + "\n" + } + if schema.Description != nil { + result += indent + "description: " + *(schema.Description) + "\n" + } + if schema.Default != nil { + result += indent + "default:\n" + result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) + } + if schema.Format != nil { + result += indent + "format: " + *(schema.Format) + "\n" + } + if schema.Ref != nil { + result += indent + "$ref: " + *(schema.Ref) + "\n" + } + return result +} diff --git a/vendor/github.com/google/gnostic-models/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go new file mode 100644 index 00000000..4781bdc5 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/models.go @@ -0,0 +1,228 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonschema supports the reading, writing, and manipulation +// of JSON Schemas. +package jsonschema + +import "gopkg.in/yaml.v3" + +// The Schema struct models a JSON Schema and, because schemas are +// defined hierarchically, contains many references to itself. +// All fields are pointers and are nil if the associated values +// are not specified. +type Schema struct { + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + + // http://json-schema.org/latest/json-schema-validation.html + // 5.1. Validation keywords for numeric instances (number and integer) + MultipleOf *SchemaNumber + Maximum *SchemaNumber + ExclusiveMaximum *bool + Minimum *SchemaNumber + ExclusiveMinimum *bool + + // 5.2. Validation keywords for strings + MaxLength *int64 + MinLength *int64 + Pattern *string + + // 5.3. Validation keywords for arrays + AdditionalItems *SchemaOrBoolean + Items *SchemaOrSchemaArray + MaxItems *int64 + MinItems *int64 + UniqueItems *bool + + // 5.4. Validation keywords for objects + MaxProperties *int64 + MinProperties *int64 + Required *[]string + AdditionalProperties *SchemaOrBoolean + Properties *[]*NamedSchema + PatternProperties *[]*NamedSchema + Dependencies *[]*NamedSchemaOrStringArray + + // 5.5. Validation keywords for any instance type + Enumeration *[]SchemaEnumValue + Type *StringOrStringArray + AllOf *[]*Schema + AnyOf *[]*Schema + OneOf *[]*Schema + Not *Schema + Definitions *[]*NamedSchema + + // 6. Metadata keywords + Title *string + Description *string + Default *yaml.Node + + // 7. Semantic validation with "format" + Format *string +} + +// These helper structs represent "combination" types that generally can +// have values of one type or another. All are used to represent parts +// of Schemas. + +// SchemaNumber represents a value that can be either an Integer or a Float. +type SchemaNumber struct { + Integer *int64 + Float *float64 +} + +// NewSchemaNumberWithInteger creates and returns a new object +func NewSchemaNumberWithInteger(i int64) *SchemaNumber { + result := &SchemaNumber{} + result.Integer = &i + return result +} + +// NewSchemaNumberWithFloat creates and returns a new object +func NewSchemaNumberWithFloat(f float64) *SchemaNumber { + result := &SchemaNumber{} + result.Float = &f + return result +} + +// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. +type SchemaOrBoolean struct { + Schema *Schema + Boolean *bool +} + +// NewSchemaOrBooleanWithSchema creates and returns a new object +func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Schema = s + return result +} + +// NewSchemaOrBooleanWithBoolean creates and returns a new object +func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Boolean = &b + return result +} + +// StringOrStringArray represents a value that can be either +// a String or an Array of Strings. +type StringOrStringArray struct { + String *string + StringArray *[]string +} + +// NewStringOrStringArrayWithString creates and returns a new object +func NewStringOrStringArrayWithString(s string) *StringOrStringArray { + result := &StringOrStringArray{} + result.String = &s + return result +} + +// NewStringOrStringArrayWithStringArray creates and returns a new object +func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { + result := &StringOrStringArray{} + result.StringArray = &a + return result +} + +// SchemaOrStringArray represents a value that can be either +// a Schema or an Array of Strings. +type SchemaOrStringArray struct { + Schema *Schema + StringArray *[]string +} + +// SchemaOrSchemaArray represents a value that can be either +// a Schema or an Array of Schemas. +type SchemaOrSchemaArray struct { + Schema *Schema + SchemaArray *[]*Schema +} + +// NewSchemaOrSchemaArrayWithSchema creates and returns a new object +func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.Schema = s + return result +} + +// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object +func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.SchemaArray = &a + return result +} + +// SchemaEnumValue represents a value that can be part of an +// enumeration in a Schema. +type SchemaEnumValue struct { + String *string + Bool *bool +} + +// NamedSchema is a name-value pair that is used to emulate maps +// with ordered keys. +type NamedSchema struct { + Name string + Value *Schema +} + +// NewNamedSchema creates and returns a new object +func NewNamedSchema(name string, value *Schema) *NamedSchema { + return &NamedSchema{Name: name, Value: value} +} + +// NamedSchemaOrStringArray is a name-value pair that is used +// to emulate maps with ordered keys. +type NamedSchemaOrStringArray struct { + Name string + Value *SchemaOrStringArray +} + +// Access named subschemas by name + +func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { + if array == nil { + return nil + } + for _, pair := range *array { + if pair.Name == name { + return pair.Value + } + } + return nil +} + +// PropertyWithName returns the selected element. +func (s *Schema) PropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Properties, name) +} + +// PatternPropertyWithName returns the selected element. +func (s *Schema) PatternPropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.PatternProperties, name) +} + +// DefinitionWithName returns the selected element. +func (s *Schema) DefinitionWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Definitions, name) +} + +// AddProperty adds a named property. +func (s *Schema) AddProperty(name string, property *Schema) { + *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) +} diff --git a/vendor/github.com/google/gnostic-models/jsonschema/operations.go b/vendor/github.com/google/gnostic-models/jsonschema/operations.go new file mode 100644 index 00000000..ba8dd4a9 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/operations.go @@ -0,0 +1,394 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "log" + "strings" +) + +// +// OPERATIONS +// The following methods perform operations on Schemas. +// + +// IsEmpty returns true if no members of the Schema are specified. +func (schema *Schema) IsEmpty() bool { + return (schema.Schema == nil) && + (schema.ID == nil) && + (schema.MultipleOf == nil) && + (schema.Maximum == nil) && + (schema.ExclusiveMaximum == nil) && + (schema.Minimum == nil) && + (schema.ExclusiveMinimum == nil) && + (schema.MaxLength == nil) && + (schema.MinLength == nil) && + (schema.Pattern == nil) && + (schema.AdditionalItems == nil) && + (schema.Items == nil) && + (schema.MaxItems == nil) && + (schema.MinItems == nil) && + (schema.UniqueItems == nil) && + (schema.MaxProperties == nil) && + (schema.MinProperties == nil) && + (schema.Required == nil) && + (schema.AdditionalProperties == nil) && + (schema.Properties == nil) && + (schema.PatternProperties == nil) && + (schema.Dependencies == nil) && + (schema.Enumeration == nil) && + (schema.Type == nil) && + (schema.AllOf == nil) && + (schema.AnyOf == nil) && + (schema.OneOf == nil) && + (schema.Not == nil) && + (schema.Definitions == nil) && + (schema.Title == nil) && + (schema.Description == nil) && + (schema.Default == nil) && + (schema.Format == nil) && + (schema.Ref == nil) +} + +// IsEqual returns true if two schemas are equal. +func (schema *Schema) IsEqual(schema2 *Schema) bool { + return schema.String() == schema2.String() +} + +// SchemaOperation represents a function that can be applied to a Schema. +type SchemaOperation func(schema *Schema, context string) + +// Applies a specified function to a Schema and all of the Schemas that it contains. +func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { + + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalItems") + } + } + + if schema.Items != nil { + if schema.Items.SchemaArray != nil { + for _, s := range *(schema.Items.SchemaArray) { + s.applyToSchemas(operation, "Items.SchemaArray") + } + } else if schema.Items.Schema != nil { + schema.Items.Schema.applyToSchemas(operation, "Items.Schema") + } + } + + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalProperties") + } + } + + if schema.Properties != nil { + for _, pair := range *(schema.Properties) { + s := pair.Value + s.applyToSchemas(operation, "Properties") + } + } + if schema.PatternProperties != nil { + for _, pair := range *(schema.PatternProperties) { + s := pair.Value + s.applyToSchemas(operation, "PatternProperties") + } + } + + if schema.Dependencies != nil { + for _, pair := range *(schema.Dependencies) { + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + s.applyToSchemas(operation, "Dependencies") + } + } + } + + if schema.AllOf != nil { + for _, s := range *(schema.AllOf) { + s.applyToSchemas(operation, "AllOf") + } + } + if schema.AnyOf != nil { + for _, s := range *(schema.AnyOf) { + s.applyToSchemas(operation, "AnyOf") + } + } + if schema.OneOf != nil { + for _, s := range *(schema.OneOf) { + s.applyToSchemas(operation, "OneOf") + } + } + if schema.Not != nil { + schema.Not.applyToSchemas(operation, "Not") + } + + if schema.Definitions != nil { + for _, pair := range *(schema.Definitions) { + s := pair.Value + s.applyToSchemas(operation, "Definitions") + } + } + + operation(schema, context) +} + +// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. +func (schema *Schema) CopyProperties(source *Schema) { + if source.Schema != nil { + schema.Schema = source.Schema + } + if source.ID != nil { + schema.ID = source.ID + } + if source.MultipleOf != nil { + schema.MultipleOf = source.MultipleOf + } + if source.Maximum != nil { + schema.Maximum = source.Maximum + } + if source.ExclusiveMaximum != nil { + schema.ExclusiveMaximum = source.ExclusiveMaximum + } + if source.Minimum != nil { + schema.Minimum = source.Minimum + } + if source.ExclusiveMinimum != nil { + schema.ExclusiveMinimum = source.ExclusiveMinimum + } + if source.MaxLength != nil { + schema.MaxLength = source.MaxLength + } + if source.MinLength != nil { + schema.MinLength = source.MinLength + } + if source.Pattern != nil { + schema.Pattern = source.Pattern + } + if source.AdditionalItems != nil { + schema.AdditionalItems = source.AdditionalItems + } + if source.Items != nil { + schema.Items = source.Items + } + if source.MaxItems != nil { + schema.MaxItems = source.MaxItems + } + if source.MinItems != nil { + schema.MinItems = source.MinItems + } + if source.UniqueItems != nil { + schema.UniqueItems = source.UniqueItems + } + if source.MaxProperties != nil { + schema.MaxProperties = source.MaxProperties + } + if source.MinProperties != nil { + schema.MinProperties = source.MinProperties + } + if source.Required != nil { + schema.Required = source.Required + } + if source.AdditionalProperties != nil { + schema.AdditionalProperties = source.AdditionalProperties + } + if source.Properties != nil { + schema.Properties = source.Properties + } + if source.PatternProperties != nil { + schema.PatternProperties = source.PatternProperties + } + if source.Dependencies != nil { + schema.Dependencies = source.Dependencies + } + if source.Enumeration != nil { + schema.Enumeration = source.Enumeration + } + if source.Type != nil { + schema.Type = source.Type + } + if source.AllOf != nil { + schema.AllOf = source.AllOf + } + if source.AnyOf != nil { + schema.AnyOf = source.AnyOf + } + if source.OneOf != nil { + schema.OneOf = source.OneOf + } + if source.Not != nil { + schema.Not = source.Not + } + if source.Definitions != nil { + schema.Definitions = source.Definitions + } + if source.Title != nil { + schema.Title = source.Title + } + if source.Description != nil { + schema.Description = source.Description + } + if source.Default != nil { + schema.Default = source.Default + } + if source.Format != nil { + schema.Format = source.Format + } + if source.Ref != nil { + schema.Ref = source.Ref + } +} + +// TypeIs returns true if the Type of a Schema includes the specified type +func (schema *Schema) TypeIs(typeName string) bool { + if schema.Type != nil { + // the schema Type is either a string or an array of strings + if schema.Type.String != nil { + return (*(schema.Type.String) == typeName) + } else if schema.Type.StringArray != nil { + for _, n := range *(schema.Type.StringArray) { + if n == typeName { + return true + } + } + } + } + return false +} + +// ResolveRefs resolves "$ref" elements in a Schema and its children. +// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, +// the reference is kept and we expect downstream tools to separately model these +// referenced schemas. +func (schema *Schema) ResolveRefs() { + rootSchema := schema + count := 1 + for count > 0 { + count = 0 + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.Ref != nil { + resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) + if err != nil { + log.Printf("%+v", err) + } else if resolvedRef.TypeIs("object") { + // don't substitute for objects, we'll model the referenced schema with a class + } else if context == "OneOf" { + // don't substitute for references inside oneOf declarations + } else if resolvedRef.OneOf != nil { + // don't substitute for references that contain oneOf declarations + } else if resolvedRef.AdditionalProperties != nil { + // don't substitute for references that look like objects + } else { + schema.Ref = nil + schema.CopyProperties(resolvedRef) + count++ + } + } + }, "") + } +} + +// resolveJSONPointer resolves JSON pointers. +// This current implementation is very crude and custom for OpenAPI 2.0 schemas. +// It panics for any pointer that it is unable to resolve. +func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { + parts := strings.Split(ref, "#") + if len(parts) == 2 { + documentName := parts[0] + "#" + if documentName == "#" && schema.ID != nil { + documentName = *(schema.ID) + } + path := parts[1] + document := schemas[documentName] + pathParts := strings.Split(path, "/") + + // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases + if len(pathParts) == 1 { + return document, nil + } else if len(pathParts) == 3 { + switch pathParts[1] { + case "definitions": + dictionary := document.Definitions + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + case "properties": + dictionary := document.Properties + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + default: + break + } + } + } + if result == nil { + return nil, fmt.Errorf("unresolved pointer: %+v", ref) + } + return result, nil +} + +// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. +func (schema *Schema) ResolveAllOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AllOf != nil { + for _, allOf := range *(schema.AllOf) { + schema.CopyProperties(allOf) + } + schema.AllOf = nil + } + }, "resolveAllOfs") +} + +// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". +func (schema *Schema) ResolveAnyOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AnyOf != nil { + schema.OneOf = schema.AnyOf + schema.AnyOf = nil + } + }, "resolveAnyOfs") +} + +// return a pointer to a copy of a passed-in string +func stringptr(input string) (output *string) { + return &input +} + +// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperty(name string) { + *schema.Properties = append(*schema.Properties, + NewNamedSchema(name, + &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) +} + +// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperties(names []string) { + for _, name := range names { + schema.CopyOfficialSchemaProperty(name) + } +} diff --git a/vendor/github.com/google/gnostic-models/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go new file mode 100644 index 00000000..b8583d46 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/reader.go @@ -0,0 +1,442 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go run generate-base.go + +package jsonschema + +import ( + "fmt" + "io/ioutil" + "strconv" + + "gopkg.in/yaml.v3" +) + +// This is a global map of all known Schemas. +// It is initialized when the first Schema is created and inserted. +var schemas map[string]*Schema + +// NewBaseSchema builds a schema object from an embedded json representation. +func NewBaseSchema() (schema *Schema, err error) { + b, err := baseSchemaBytes() + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(b, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromFile reads a schema from a file. +// Currently this assumes that schemas are stored in the source distribution of this project. +func NewSchemaFromFile(filename string) (schema *Schema, err error) { + file, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(file, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromObject constructs a schema from a parsed JSON object. +// Due to the complexity of the schema representation, this is a +// custom reader and not the standard Go JSON reader (encoding/json). +func NewSchemaFromObject(jsonData *yaml.Node) *Schema { + switch jsonData.Kind { + case yaml.DocumentNode: + return NewSchemaFromObject(jsonData.Content[0]) + case yaml.MappingNode: + schema := &Schema{} + + for i := 0; i < len(jsonData.Content); i += 2 { + k := jsonData.Content[i].Value + v := jsonData.Content[i+1] + + switch k { + case "$schema": + schema.Schema = schema.stringValue(v) + case "id": + schema.ID = schema.stringValue(v) + + case "multipleOf": + schema.MultipleOf = schema.numberValue(v) + case "maximum": + schema.Maximum = schema.numberValue(v) + case "exclusiveMaximum": + schema.ExclusiveMaximum = schema.boolValue(v) + case "minimum": + schema.Minimum = schema.numberValue(v) + case "exclusiveMinimum": + schema.ExclusiveMinimum = schema.boolValue(v) + + case "maxLength": + schema.MaxLength = schema.intValue(v) + case "minLength": + schema.MinLength = schema.intValue(v) + case "pattern": + schema.Pattern = schema.stringValue(v) + + case "additionalItems": + schema.AdditionalItems = schema.schemaOrBooleanValue(v) + case "items": + schema.Items = schema.schemaOrSchemaArrayValue(v) + case "maxItems": + schema.MaxItems = schema.intValue(v) + case "minItems": + schema.MinItems = schema.intValue(v) + case "uniqueItems": + schema.UniqueItems = schema.boolValue(v) + + case "maxProperties": + schema.MaxProperties = schema.intValue(v) + case "minProperties": + schema.MinProperties = schema.intValue(v) + case "required": + schema.Required = schema.arrayOfStringsValue(v) + case "additionalProperties": + schema.AdditionalProperties = schema.schemaOrBooleanValue(v) + case "properties": + schema.Properties = schema.mapOfSchemasValue(v) + case "patternProperties": + schema.PatternProperties = schema.mapOfSchemasValue(v) + case "dependencies": + schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) + + case "enum": + schema.Enumeration = schema.arrayOfEnumValuesValue(v) + + case "type": + schema.Type = schema.stringOrStringArrayValue(v) + case "allOf": + schema.AllOf = schema.arrayOfSchemasValue(v) + case "anyOf": + schema.AnyOf = schema.arrayOfSchemasValue(v) + case "oneOf": + schema.OneOf = schema.arrayOfSchemasValue(v) + case "not": + schema.Not = NewSchemaFromObject(v) + case "definitions": + schema.Definitions = schema.mapOfSchemasValue(v) + + case "title": + schema.Title = schema.stringValue(v) + case "description": + schema.Description = schema.stringValue(v) + + case "default": + schema.Default = v + + case "format": + schema.Format = schema.stringValue(v) + case "$ref": + schema.Ref = schema.stringValue(v) + default: + fmt.Printf("UNSUPPORTED (%s)\n", k) + } + } + + // insert schema in global map + if schema.ID != nil { + if schemas == nil { + schemas = make(map[string]*Schema, 0) + } + schemas[*(schema.ID)] = schema + } + return schema + + default: + fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) + return nil + } + + return nil +} + +// +// BUILDERS +// The following methods build elements of Schemas from interface{} values. +// Each returns nil if it is unable to build the desired element. +// + +// Gets the string value of an interface{} value if possible. +func (schema *Schema) stringValue(v *yaml.Node) *string { + switch v.Kind { + case yaml.ScalarNode: + return &v.Value + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the numeric value of an interface{} value if possible. +func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { + number := &SchemaNumber{} + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + number.Float = &v2 + return number + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + number.Integer = &v2 + return number + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the integer value of an interface{} value if possible. +func (schema *Schema) intValue(v *yaml.Node) *int64 { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + v3 := int64(v2) + return &v3 + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + return &v2 + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the bool value of an interface{} value if possible. +func (schema *Schema) boolValue(v *yaml.Node) *bool { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!bool": + v2, _ := strconv.ParseBool(v.Value) + return &v2 + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a map of Schemas from an interface{} value if possible. +func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { + switch v.Kind { + case yaml.MappingNode: + m := make([]*NamedSchema, 0) + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} + m = append(m, pair) + } + return &m + default: + fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of Schemas from an interface{} value if possible. +func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) + } + } + return &m + case yaml.MappingNode: + m := make([]*Schema, 0) + s := NewSchemaFromObject(v) + m = append(m, s) + return &m + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a Schema or an array of Schemas from an interface{} value if possible. +func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) + } + } + return &SchemaOrSchemaArray{SchemaArray: &m} + case yaml.MappingNode: + s := NewSchemaFromObject(v) + return &SchemaOrSchemaArray{Schema: s} + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of strings from an interface{} value if possible. +func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { + switch v.Kind { + case yaml.ScalarNode: + a := []string{v.Value} + return &a + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + return &a + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a string or an array of strings from an interface{} value if possible. +func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { + switch v.Kind { + case yaml.ScalarNode: + s := &StringOrStringArray{} + s.String = &v.Value + return s + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + s := &StringOrStringArray{} + s.StringArray = &a + return s + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of enum values from an interface{} value if possible. +func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { + a := make([]SchemaEnumValue, 0) + switch v.Kind { + case yaml.SequenceNode: + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + switch v2.Tag { + case "!!str": + a = append(a, SchemaEnumValue{String: &v2.Value}) + case "!!bool": + v3, _ := strconv.ParseBool(v2.Value) + a = append(a, SchemaEnumValue{Bool: &v3}) + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) + } + return &a +} + +// Gets a map of schemas or string arrays from an interface{} value if possible. +func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { + m := make([]*NamedSchemaOrStringArray, 0) + switch v.Kind { + case yaml.MappingNode: + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + switch v2.Kind { + case yaml.SequenceNode: + a := make([]string, 0) + for _, v3 := range v2.Content { + switch v3.Kind { + case yaml.ScalarNode: + a = append(a, v3.Value) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) + } + } + s := &SchemaOrStringArray{} + s.StringArray = &a + pair := &NamedSchemaOrStringArray{Name: k2, Value: s} + m = append(m, pair) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) + } + return &m +} + +// Gets a schema or a boolean value from an interface{} value if possible. +func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { + schemaOrBoolean := &SchemaOrBoolean{} + switch v.Kind { + case yaml.ScalarNode: + v2, _ := strconv.ParseBool(v.Value) + schemaOrBoolean.Boolean = &v2 + case yaml.MappingNode: + schemaOrBoolean.Schema = NewSchemaFromObject(v) + default: + fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) + } + return schemaOrBoolean +} diff --git a/vendor/github.com/google/gnostic-models/jsonschema/schema.json b/vendor/github.com/google/gnostic-models/jsonschema/schema.json new file mode 100644 index 00000000..85eb502a --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/schema.json @@ -0,0 +1,150 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uri" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/google/gnostic-models/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go new file mode 100644 index 00000000..340dc5f9 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/jsonschema/writer.go @@ -0,0 +1,369 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +const indentation = " " + +func renderMappingNode(node *yaml.Node, indent string) (result string) { + result = "{\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i += 2 { + // first print the key + key := node.Content[i].Value + result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) + // then the value + value := node.Content[i+1] + switch value.Kind { + case yaml.ScalarNode: + result += "\"" + value.Value + "\"" + case yaml.MappingNode: + result += renderMappingNode(value, innerIndent) + case yaml.SequenceNode: + result += renderSequenceNode(value, innerIndent) + default: + result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) + } + if i < len(node.Content)-2 { + result += "," + } + result += "\n" + } + + result += indent + "}" + return result +} + +func renderSequenceNode(node *yaml.Node, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i++ { + item := node.Content[i] + switch item.Kind { + case yaml.ScalarNode: + result += innerIndent + "\"" + item.Value + "\"" + case yaml.MappingNode: + result += innerIndent + renderMappingNode(item, innerIndent) + "" + default: + result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) + } + if i < len(node.Content)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +func renderStringArray(array []string, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i, item := range array { + result += innerIndent + "\"" + item + "\"" + if i < len(array)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +// Render renders a yaml.Node as JSON +func Render(node *yaml.Node) string { + if node.Kind == yaml.DocumentNode { + if len(node.Content) == 1 { + return Render(node.Content[0]) + } + } else if node.Kind == yaml.MappingNode { + return renderMappingNode(node, "") + "\n" + } else if node.Kind == yaml.SequenceNode { + return renderSequenceNode(node, "") + "\n" + } + return "" +} + +func (object *SchemaNumber) nodeValue() *yaml.Node { + if object.Integer != nil { + return nodeForInt64(*object.Integer) + } else if object.Float != nil { + return nodeForFloat64(*object.Float) + } else { + return nil + } +} + +func (object *SchemaOrBoolean) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.Boolean != nil { + return nodeForBoolean(*object.Boolean) + } else { + return nil + } +} + +func nodeForStringArray(array []string) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, nodeForString(item)) + } + return nodeForSequence(content) +} + +func nodeForSchemaArray(array []*Schema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func (object *StringOrStringArray) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrStringArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.SchemaArray != nil { + return nodeForSchemaArray(*(object.SchemaArray)) + } else { + return nil + } +} + +func (object *SchemaEnumValue) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.Bool != nil { + return nodeForBoolean(*object.Bool) + } else { + return nil + } +} + +func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range *array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func nodeForMapping(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: content, + } +} + +func nodeForSequence(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.SequenceNode, + Content: content, + } +} + +func nodeForString(value string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: value, + } +} + +func nodeForBoolean(value bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", value), + } +} + +func nodeForInt64(value int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", value), + } +} + +func nodeForFloat64(value float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%f", value), + } +} + +func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { + nodes = append(nodes, nodeForString(name)) + nodes = append(nodes, value) + return nodes +} + +func (schema *Schema) nodeValue() *yaml.Node { + n := &yaml.Node{Kind: yaml.MappingNode} + content := make([]*yaml.Node, 0) + if schema.Title != nil { + content = appendPair(content, "title", nodeForString(*schema.Title)) + } + if schema.ID != nil { + content = appendPair(content, "id", nodeForString(*schema.ID)) + } + if schema.Schema != nil { + content = appendPair(content, "$schema", nodeForString(*schema.Schema)) + } + if schema.Type != nil { + content = appendPair(content, "type", schema.Type.nodeValue()) + } + if schema.Items != nil { + content = appendPair(content, "items", schema.Items.nodeValue()) + } + if schema.Description != nil { + content = appendPair(content, "description", nodeForString(*schema.Description)) + } + if schema.Required != nil { + content = appendPair(content, "required", nodeForStringArray(*schema.Required)) + } + if schema.AdditionalProperties != nil { + content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) + } + if schema.PatternProperties != nil { + content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) + } + if schema.Properties != nil { + content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) + } + if schema.Dependencies != nil { + content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) + } + if schema.Ref != nil { + content = appendPair(content, "$ref", nodeForString(*schema.Ref)) + } + if schema.MultipleOf != nil { + content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) + } + if schema.Maximum != nil { + content = appendPair(content, "maximum", schema.Maximum.nodeValue()) + } + if schema.ExclusiveMaximum != nil { + content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + content = appendPair(content, "minimum", schema.Minimum.nodeValue()) + } + if schema.ExclusiveMinimum != nil { + content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) + } + if schema.MinLength != nil { + content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) + } + if schema.Pattern != nil { + content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) + } + if schema.AdditionalItems != nil { + content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) + } + if schema.MaxItems != nil { + content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) + } + if schema.MinItems != nil { + content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) + } + if schema.UniqueItems != nil { + content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) + } + if schema.MaxProperties != nil { + content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) + } + if schema.MinProperties != nil { + content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) + } + if schema.Enumeration != nil { + content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) + } + if schema.AllOf != nil { + content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) + } + if schema.AnyOf != nil { + content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) + } + if schema.OneOf != nil { + content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) + } + if schema.Not != nil { + content = appendPair(content, "not", schema.Not.nodeValue()) + } + if schema.Definitions != nil { + content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) + } + if schema.Default != nil { + // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) + } + if schema.Format != nil { + content = appendPair(content, "format", nodeForString(*schema.Format)) + } + n.Content = content + return n +} + +// JSONString returns a json representation of a schema. +func (schema *Schema) JSONString() string { + node := schema.nodeValue() + return Render(node) +} diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go new file mode 100644 index 00000000..d71fe6d5 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go @@ -0,0 +1,8820 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "regexp" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/compiler" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := compiler.BoolForScalarNode(in) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + matched = true + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes := compiler.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := compiler.SequenceNodeForNode(v5) + if ok { + x.Schemes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := compiler.SequenceNodeForNode(v6) + if ok { + x.Consumes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := compiler.SequenceNodeForNode(v7) + if ok { + x.Produces = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", v11, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", v13, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := compiler.SequenceNodeForNode(v14) + if ok { + for _, item := range a.Content { + y, err := NewTag(item, compiler.NewContext("tags", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", v15, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := compiler.SequenceNodeForNode(v5) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v21) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := compiler.IntForScalarNode(v10) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := compiler.IntForScalarNode(v13) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v16) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = compiler.StringForScalarNode(v18) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", m, context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", m, context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid NonBodyParameter") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedString{} + pair.Name = k + pair.Value, _ = compiler.StringForScalarNode(v) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := compiler.SequenceNodeForNode(v1) + if ok { + x.Tags = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := compiler.SequenceNodeForNode(v6) + if ok { + x.Produces = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := compiler.SequenceNodeForNode(v7) + if ok { + x.Consumes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := compiler.SequenceNodeForNode(v8) + if ok { + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := compiler.SequenceNodeForNode(v10) + if ok { + x.Schemes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", m, context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", m, context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid Parameter") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ParametersItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", v6, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := compiler.SequenceNodeForNode(v9) + if ok { + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := compiler.IntForScalarNode(v10) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := compiler.IntForScalarNode(v13) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v16) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v21) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", v2, context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", v4, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid ResponseValue") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + v, ok := compiler.FloatForScalarNode(v7) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + v, ok := compiler.FloatForScalarNode(v9) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := compiler.IntForScalarNode(v12) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := compiler.SequenceNodeForNode(v19) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", item, context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", v21, context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", v22, context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", v23, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := compiler.SequenceNodeForNode(v24) + if ok { + for _, item := range a.Content { + y, err := NewSchema(item, compiler.NewContext("allOf", item, context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", v25, context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = compiler.StringForScalarNode(v26) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v27) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", v28, context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", v29, context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", v30, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", m, context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SchemaItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", m, context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } else { + message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") + err := compiler.NewError(context, message) + errors = []error{err} + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + x.Value = make([]string, 0) + for _, node := range in.Content { + s, _ := compiler.StringForScalarNode(node) + x.Value = append(x.Value, s) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + v1 := in + switch v1.Kind { + case yaml.ScalarNode: + x.Value = make([]string, 0) + x.Value = append(x.Value, v1.Value) + case yaml.SequenceNode: + x.Value = make([]string, 0) + for _, v := range v1.Content { + value := v.Value + ok := v.Kind == yaml.ScalarNode + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() *yaml.Node { + var err error + var node yaml.Node + err = yaml.Unmarshal([]byte(m.Yaml), &node) + if err == nil { + if node.Kind == yaml.DocumentNode { + return node.Content[0] + } + return &node + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.Email != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) + info.Content = append(info.Content, m.Info.ToRawInfo()) + if m.Host != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) + } + if m.BasePath != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) + } + if len(m.Schemes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) + } + if len(m.Consumes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) + } + if len(m.Produces) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) + info.Content = append(info.Content, m.Paths.ToRawInfo()) + if m.Definitions != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) + info.Content = append(info.Content, m.Definitions.ToRawInfo()) + } + if m.Parameters != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) + } + if m.Responses != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if m.SecurityDefinitions != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) + info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) + } + if len(m.Tags) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Tags { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, items) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.TermsOfService != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) + } + if m.Contact != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) + info.Content = append(info.Content, m.Contact.ToRawInfo()) + } + if m.License != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) + info.Content = append(info.Content, m.License.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Schema) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Schema { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, items) + } + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, m.Value.ToRawInfo()) + } + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Tags) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.OperationId != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) + } + if len(m.Produces) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) + } + if len(m.Consumes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + if len(m.Schemes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Get != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) + info.Content = append(info.Content, m.Get.ToRawInfo()) + } + if m.Put != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) + info.Content = append(info.Content, m.Put.ToRawInfo()) + } + if m.Post != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) + info.Content = append(info.Content, m.Post.ToRawInfo()) + } + if m.Delete != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) + info.Content = append(info.Content, m.Delete.ToRawInfo()) + } + if m.Options != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) + info.Content = append(info.Content, m.Options.ToRawInfo()) + } + if m.Head != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) + info.Content = append(info.Content, m.Head.ToRawInfo()) + } + if m.Patch != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) + info.Content = append(info.Content, m.Patch.ToRawInfo()) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.Path != nil { + for _, item := range m.Path { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if m.MaxProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) + } + if m.MinProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.AdditionalProperties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) + info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) + } + if m.Type != nil { + if len(m.Type.Value) == 1 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) + } else { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) + } + } + if m.Items != nil { + items := compiler.NewSequenceNode() + for _, item := range m.Items.Schema { + items.Content = append(items.Content, item.ToRawInfo()) + } + if len(items.Content) == 1 { + items = items.Content[0] + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, items) + } + if len(m.AllOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.AllOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) + info.Content = append(info.Content, items) + } + if m.Properties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) + info.Content = append(info.Content, m.Properties.ToRawInfo()) + } + if m.Discriminator != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) + } + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.Xml != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) + info.Content = append(info.Content, m.Xml.ToRawInfo()) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return compiler.NewNullNode() +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() *yaml.Node { + return compiler.NewSequenceNodeForStringArray(m.Value) +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Value) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Namespace != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) + } + if m.Prefix != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) + } + if m.Attribute != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) + } + if m.Wrapped != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go new file mode 100644 index 00000000..65c4c913 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go @@ -0,0 +1,7342 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.19.3 +// source: openapiv2/OpenAPIv2.proto + +package openapi_v2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AdditionalPropertiesItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *AdditionalPropertiesItem) Reset() { + *x = AdditionalPropertiesItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdditionalPropertiesItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdditionalPropertiesItem) ProtoMessage() {} + +func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} +} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (x *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} +} + +func (x *Any) GetValue() *anypb.Any { + if x != nil { + return x.Value + } + return nil +} + +func (x *Any) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +type ApiKeySecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *ApiKeySecurity) Reset() { + *x = ApiKeySecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiKeySecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiKeySecurity) ProtoMessage() {} + +func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} +} + +func (x *ApiKeySecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ApiKeySecurity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ApiKeySecurity) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *ApiKeySecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *BasicAuthenticationSecurity) Reset() { + *x = BasicAuthenticationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicAuthenticationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicAuthenticationSecurity) ProtoMessage() {} + +func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} +} + +func (x *BasicAuthenticationSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *BasicAuthenticationSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type BodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *BodyParameter) Reset() { + *x = BodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BodyParameter) ProtoMessage() {} + +func (x *BodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. +func (*BodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} +} + +func (x *BodyParameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BodyParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BodyParameter) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *BodyParameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *BodyParameter) GetSchema() *Schema { + if x != nil { + return x.Schema + } + return nil +} + +func (x *BodyParameter) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Contact) Reset() { + *x = Contact{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Contact) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Default struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Default) Reset() { + *x = Default{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Default) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Default) ProtoMessage() {} + +func (x *Default) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Default.ProtoReflect.Descriptor instead. +func (*Default) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} +} + +func (x *Default) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Definitions) Reset() { + *x = Definitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Definitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Definitions) ProtoMessage() {} + +func (x *Definitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. +func (*Definitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} +} + +func (x *Definitions) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} +} + +func (x *Document) GetSwagger() string { + if x != nil { + return x.Swagger + } + return "" +} + +func (x *Document) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Document) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *Document) GetBasePath() string { + if x != nil { + return x.BasePath + } + return "" +} + +func (x *Document) GetSchemes() []string { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Document) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Document) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Document) GetPaths() *Paths { + if x != nil { + return x.Paths + } + return nil +} + +func (x *Document) GetDefinitions() *Definitions { + if x != nil { + return x.Definitions + } + return nil +} + +func (x *Document) GetParameters() *ParameterDefinitions { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Document) GetResponses() *ResponseDefinitions { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Document) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions + } + return nil +} + +func (x *Document) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Document) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Document) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Examples struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Examples) Reset() { + *x = Examples{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Examples) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Examples) ProtoMessage() {} + +func (x *Examples) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Examples.ProtoReflect.Descriptor instead. +func (*Examples) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} +} + +func (x *Examples) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *ExternalDocs) Reset() { + *x = ExternalDocs{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalDocs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocs) ProtoMessage() {} + +func (x *ExternalDocs) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} +} + +func (x *ExternalDocs) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocs) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalDocs) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FileSchema) Reset() { + *x = FileSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileSchema) ProtoMessage() {} + +func (x *FileSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. +func (*FileSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} +} + +func (x *FileSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *FileSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *FileSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *FileSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *FileSchema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *FileSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *FileSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *FileSchema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *FileSchema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *FileSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FormDataParameterSubSchema) Reset() { + *x = FormDataParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FormDataParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FormDataParameterSubSchema) ProtoMessage() {} + +func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} +} + +func (x *FormDataParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *FormDataParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *FormDataParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *FormDataParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *FormDataParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *FormDataParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *FormDataParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *FormDataParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *FormDataParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *FormDataParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *FormDataParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *FormDataParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *FormDataParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} +} + +func (x *Header) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Header) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *Header) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *Header) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *Header) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Header) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Header) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Header) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Header) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Header) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Header) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Header) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Header) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Header) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Header) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *HeaderParameterSubSchema) Reset() { + *x = HeaderParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameterSubSchema) ProtoMessage() {} + +func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} +} + +func (x *HeaderParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *HeaderParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *HeaderParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *HeaderParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HeaderParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *HeaderParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *HeaderParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *HeaderParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *HeaderParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *HeaderParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *HeaderParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *HeaderParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *HeaderParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Headers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Headers) Reset() { + *x = Headers{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Headers) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Headers) ProtoMessage() {} + +func (x *Headers) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Headers.ProtoReflect.Descriptor instead. +func (*Headers) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} +} + +func (x *Headers) GetAdditionalProperties() []*NamedHeader { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type ItemsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` +} + +func (x *ItemsItem) Reset() { + *x = ItemsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ItemsItem) ProtoMessage() {} + +func (x *ItemsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. +func (*ItemsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} +} + +func (x *ItemsItem) GetSchema() []*Schema { + if x != nil { + return x.Schema + } + return nil +} + +type JsonReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *JsonReference) Reset() { + *x = JsonReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JsonReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JsonReference) ProtoMessage() {} + +func (x *JsonReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. +func (*JsonReference) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} +} + +func (x *JsonReference) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *JsonReference) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *License) Reset() { + *x = License{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *License) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedAny) Reset() { + *x = NamedAny{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedAny) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedAny) ProtoMessage() {} + +func (x *NamedAny) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. +func (*NamedAny) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} +} + +func (x *NamedAny) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedAny) GetValue() *Any { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedHeader) Reset() { + *x = NamedHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedHeader) ProtoMessage() {} + +func (x *NamedHeader) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. +func (*NamedHeader) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} +} + +func (x *NamedHeader) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedHeader) GetValue() *Header { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedParameter) Reset() { + *x = NamedParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedParameter) ProtoMessage() {} + +func (x *NamedParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. +func (*NamedParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} +} + +func (x *NamedParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedParameter) GetValue() *Parameter { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedPathItem) Reset() { + *x = NamedPathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedPathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedPathItem) ProtoMessage() {} + +func (x *NamedPathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} +} + +func (x *NamedPathItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedPathItem) GetValue() *PathItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponse) Reset() { + *x = NamedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponse) ProtoMessage() {} + +func (x *NamedResponse) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. +func (*NamedResponse) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} +} + +func (x *NamedResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponse) GetValue() *Response { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponseValue) Reset() { + *x = NamedResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponseValue) ProtoMessage() {} + +func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} +} + +func (x *NamedResponseValue) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponseValue) GetValue() *ResponseValue { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSchema) Reset() { + *x = NamedSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSchema) ProtoMessage() {} + +func (x *NamedSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. +func (*NamedSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} +} + +func (x *NamedSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSchema) GetValue() *Schema { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSecurityDefinitionsItem) Reset() { + *x = NamedSecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} + +func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} +} + +func (x *NamedSecurityDefinitionsItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedString) Reset() { + *x = NamedString{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedString) ProtoMessage() {} + +func (x *NamedString) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. +func (*NamedString) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} +} + +func (x *NamedString) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedString) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedStringArray) Reset() { + *x = NamedStringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedStringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedStringArray) ProtoMessage() {} + +func (x *NamedStringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} +} + +func (x *NamedStringArray) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedStringArray) GetValue() *StringArray { + if x != nil { + return x.Value + } + return nil +} + +type NonBodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (x *NonBodyParameter) Reset() { + *x = NonBodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NonBodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NonBodyParameter) ProtoMessage() {} + +func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} +} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +type Oauth2AccessCodeSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2AccessCodeSecurity) Reset() { + *x = Oauth2AccessCodeSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2AccessCodeSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} + +func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} +} + +func (x *Oauth2AccessCodeSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2ApplicationSecurity) Reset() { + *x = Oauth2ApplicationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2ApplicationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2ApplicationSecurity) ProtoMessage() {} + +func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} +} + +func (x *Oauth2ApplicationSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2ImplicitSecurity) Reset() { + *x = Oauth2ImplicitSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2ImplicitSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2ImplicitSecurity) ProtoMessage() {} + +func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} +} + +func (x *Oauth2ImplicitSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2PasswordSecurity) Reset() { + *x = Oauth2PasswordSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2PasswordSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2PasswordSecurity) ProtoMessage() {} + +func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} +} + +func (x *Oauth2PasswordSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2PasswordSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Oauth2Scopes) Reset() { + *x = Oauth2Scopes{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2Scopes) ProtoMessage() {} + +func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} +} + +func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Operation) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Operation) GetResponses() *Responses { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetSchemes() []string { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (x *Parameter) Reset() { + *x = Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameter) ProtoMessage() {} + +func (x *Parameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. +func (*Parameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} +} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` +} + +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} + +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ParameterDefinitions) Reset() { + *x = ParameterDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParameterDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParameterDefinitions) ProtoMessage() {} + +func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} +} + +func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ParametersItem) Reset() { + *x = ParametersItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParametersItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParametersItem) ProtoMessage() {} + +func (x *ParametersItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. +func (*ParametersItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} +} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ParametersItem) GetParameter() *Parameter { + if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (x *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +type PathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathItem) Reset() { + *x = PathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathItem) ProtoMessage() {} + +func (x *PathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. +func (*PathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} +} + +func (x *PathItem) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *PathItem) GetGet() *Operation { + if x != nil { + return x.Get + } + return nil +} + +func (x *PathItem) GetPut() *Operation { + if x != nil { + return x.Put + } + return nil +} + +func (x *PathItem) GetPost() *Operation { + if x != nil { + return x.Post + } + return nil +} + +func (x *PathItem) GetDelete() *Operation { + if x != nil { + return x.Delete + } + return nil +} + +func (x *PathItem) GetOptions() *Operation { + if x != nil { + return x.Options + } + return nil +} + +func (x *PathItem) GetHead() *Operation { + if x != nil { + return x.Head + } + return nil +} + +func (x *PathItem) GetPatch() *Operation { + if x != nil { + return x.Patch + } + return nil +} + +func (x *PathItem) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *PathItem) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathParameterSubSchema) Reset() { + *x = PathParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathParameterSubSchema) ProtoMessage() {} + +func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} +} + +func (x *PathParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *PathParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *PathParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *PathParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PathParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PathParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *PathParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *PathParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *PathParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *PathParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *PathParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *PathParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *PathParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *PathParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *PathParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *PathParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *PathParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *PathParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` +} + +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} +} + +func (x *Paths) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +func (x *Paths) GetPath() []*NamedPathItem { + if x != nil { + return x.Path + } + return nil +} + +type PrimitivesItems struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PrimitivesItems) Reset() { + *x = PrimitivesItems{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrimitivesItems) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrimitivesItems) ProtoMessage() {} + +func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. +func (*PrimitivesItems) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} +} + +func (x *PrimitivesItems) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PrimitivesItems) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *PrimitivesItems) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *PrimitivesItems) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *PrimitivesItems) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *PrimitivesItems) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *PrimitivesItems) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *PrimitivesItems) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *PrimitivesItems) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *PrimitivesItems) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *PrimitivesItems) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *PrimitivesItems) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *PrimitivesItems) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *PrimitivesItems) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *PrimitivesItems) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *PrimitivesItems) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *PrimitivesItems) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Properties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Properties) Reset() { + *x = Properties{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Properties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Properties) ProtoMessage() {} + +func (x *Properties) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Properties.ProtoReflect.Descriptor instead. +func (*Properties) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} +} + +func (x *Properties) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *QueryParameterSubSchema) Reset() { + *x = QueryParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameterSubSchema) ProtoMessage() {} + +func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} +} + +func (x *QueryParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *QueryParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *QueryParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *QueryParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *QueryParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *QueryParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *QueryParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *QueryParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *QueryParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *QueryParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *QueryParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *QueryParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *QueryParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *QueryParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *QueryParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *QueryParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *QueryParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *QueryParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetSchema() *SchemaItem { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Response) GetHeaders() *Headers { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetExamples() *Examples { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Response) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// One or more JSON representations for responses +type ResponseDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ResponseDefinitions) Reset() { + *x = ResponseDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseDefinitions) ProtoMessage() {} + +func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} +} + +func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ResponseValue) Reset() { + *x = ResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseValue) ProtoMessage() {} + +func (x *ResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. +func (*ResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} +} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ResponseValue) GetResponse() *Response { + if x, ok := x.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (x *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +} + +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} + +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Responses) Reset() { + *x = Responses{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Responses) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Responses) ProtoMessage() {} + +func (x *Responses) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Responses.ProtoReflect.Descriptor instead. +func (*Responses) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} +} + +func (x *Responses) GetResponseCode() []*NamedResponseValue { + if x != nil { + return x.ResponseCode + } + return nil +} + +func (x *Responses) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} +} + +func (x *Schema) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *Schema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Schema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Schema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *Schema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Schema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Schema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Schema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Schema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Schema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Schema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Schema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Schema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Schema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Schema) GetMaxProperties() int64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *Schema) GetMinProperties() int64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *Schema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *Schema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +func (x *Schema) GetType() *TypeItem { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema) GetItems() *ItemsItem { + if x != nil { + return x.Items + } + return nil +} + +func (x *Schema) GetAllOf() []*Schema { + if x != nil { + return x.AllOf + } + return nil +} + +func (x *Schema) GetProperties() *Properties { + if x != nil { + return x.Properties + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetXml() *Xml { + if x != nil { + return x.Xml + } + return nil +} + +func (x *Schema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Schema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type SchemaItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SchemaItem) Reset() { + *x = SchemaItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemaItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaItem) ProtoMessage() {} + +func (x *SchemaItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. +func (*SchemaItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} +} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SchemaItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (x *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +type SecurityDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} +} + +func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SecurityDefinitionsItem) Reset() { + *x = SecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitionsItem) ProtoMessage() {} + +func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} +} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} +} + +func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type StringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *StringArray) Reset() { + *x = StringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringArray) ProtoMessage() {} + +func (x *StringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. +func (*StringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} +} + +func (x *StringArray) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. +func (*Tag) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type TypeItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *TypeItem) Reset() { + *x = TypeItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeItem) ProtoMessage() {} + +func (x *TypeItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. +func (*TypeItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} +} + +func (x *TypeItem) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *VendorExtension) Reset() { + *x = VendorExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VendorExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VendorExtension) ProtoMessage() {} + +func (x *VendorExtension) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. +func (*VendorExtension) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} +} + +func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Xml struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Xml) Reset() { + *x = Xml{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Xml) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Xml) ProtoMessage() {} + +func (x *Xml) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Xml.ProtoReflect.Descriptor instead. +func (*Xml) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} +} + +func (x *Xml) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Xml) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Xml) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *Xml) GetAttribute() bool { + if x != nil { + return x.Attribute + } + return false +} + +func (x *Xml) GetWrapped() bool { + if x != nil { + return x.Wrapped + } + return false +} + +func (x *Xml) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor + +var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, + 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, + 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, + 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, + 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, + 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, + 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, + 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, + 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, + 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, + 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, + 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, + 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, + 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, + 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, + 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, + 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, + 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, + 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, + 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, + 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, + 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, + 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, + 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, + 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, + 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, + 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, + 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, + 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, + 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, + 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, + 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, + 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, + 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, + 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, + 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, + 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, + 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, + 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, + 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, + 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, + 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, + 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, + 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, + 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, + 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, + 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, + 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, + 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, + 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, + 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, + 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, + 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, + 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, + 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, + 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, + 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, + 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, + 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, + 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x76, 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, + 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once + file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc +) + +func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { + file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { + file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) + }) + return file_openapiv2_OpenAPIv2_proto_rawDescData +} + +var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ + (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem + (*Any)(nil), // 1: openapi.v2.Any + (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity + (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity + (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter + (*Contact)(nil), // 5: openapi.v2.Contact + (*Default)(nil), // 6: openapi.v2.Default + (*Definitions)(nil), // 7: openapi.v2.Definitions + (*Document)(nil), // 8: openapi.v2.Document + (*Examples)(nil), // 9: openapi.v2.Examples + (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs + (*FileSchema)(nil), // 11: openapi.v2.FileSchema + (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema + (*Header)(nil), // 13: openapi.v2.Header + (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema + (*Headers)(nil), // 15: openapi.v2.Headers + (*Info)(nil), // 16: openapi.v2.Info + (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem + (*JsonReference)(nil), // 18: openapi.v2.JsonReference + (*License)(nil), // 19: openapi.v2.License + (*NamedAny)(nil), // 20: openapi.v2.NamedAny + (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader + (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter + (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem + (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse + (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue + (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema + (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem + (*NamedString)(nil), // 28: openapi.v2.NamedString + (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray + (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter + (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity + (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity + (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity + (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity + (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes + (*Operation)(nil), // 36: openapi.v2.Operation + (*Parameter)(nil), // 37: openapi.v2.Parameter + (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions + (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem + (*PathItem)(nil), // 40: openapi.v2.PathItem + (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema + (*Paths)(nil), // 42: openapi.v2.Paths + (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems + (*Properties)(nil), // 44: openapi.v2.Properties + (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema + (*Response)(nil), // 46: openapi.v2.Response + (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions + (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue + (*Responses)(nil), // 49: openapi.v2.Responses + (*Schema)(nil), // 50: openapi.v2.Schema + (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem + (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions + (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem + (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement + (*StringArray)(nil), // 55: openapi.v2.StringArray + (*Tag)(nil), // 56: openapi.v2.Tag + (*TypeItem)(nil), // 57: openapi.v2.TypeItem + (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension + (*Xml)(nil), // 59: openapi.v2.Xml + (*anypb.Any)(nil), // 60: google.protobuf.Any +} +var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ + 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema + 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any + 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema + 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny + 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema + 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info + 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths + 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions + 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions + 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions + 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement + 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions + 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag + 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any + 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any + 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems + 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any + 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any + 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader + 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact + 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License + 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema + 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any + 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header + 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter + 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem + 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response + 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue + 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema + 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem + 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray + 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema + 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema + 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema + 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema + 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString + 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs + 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem + 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses + 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement + 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny + 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter + 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter + 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter + 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter + 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference + 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation + 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation + 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation + 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation + 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation + 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation + 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation + 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem + 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny + 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem + 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems + 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any + 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any + 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny + 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema + 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem + 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers + 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples + 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny + 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse + 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response + 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference + 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue + 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any + 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any + 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem + 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem + 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem + 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema + 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties + 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml + 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any + 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema + 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema + 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem + 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity + 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity + 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity + 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity + 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity + 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity + 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray + 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny + 133, // [133:133] is the sub-list for method output_type + 133, // [133:133] is the sub-list for method input_type + 133, // [133:133] is the sub-list for extension type_name + 133, // [133:133] is the sub-list for extension extendee + 0, // [0:133] is the sub-list for field type_name +} + +func init() { file_openapiv2_OpenAPIv2_proto_init() } +func file_openapiv2_OpenAPIv2_proto_init() { + if File_openapiv2_OpenAPIv2_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalPropertiesItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiKeySecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BasicAuthenticationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Contact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Default); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Definitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Examples); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalDocs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FormDataParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Headers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JsonReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*License); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedPathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedStringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonBodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2AccessCodeSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ApplicationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ImplicitSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2PasswordSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2Scopes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParameterDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParametersItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrimitivesItems); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Properties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Responses); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VendorExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Xml); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, + NumEnums: 0, + NumMessages: 60, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, + DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, + MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, + }.Build() + File_openapiv2_OpenAPIv2_proto = out.File + file_openapiv2_OpenAPIv2_proto_rawDesc = nil + file_openapiv2_OpenAPIv2_proto_goTypes = nil + file_openapiv2_OpenAPIv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto new file mode 100644 index 00000000..1c59b2f4 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto @@ -0,0 +1,666 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "./openapiv2;openapi_v2"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for responses +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/google/gnostic-models/openapiv2/README.md b/vendor/github.com/google/gnostic-models/openapiv2/README.md new file mode 100644 index 00000000..5276128d --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv2/README.md @@ -0,0 +1,14 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model and related code for +supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol +Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into +the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler +generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer +compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/google/gnostic-models/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go new file mode 100644 index 00000000..e96ac0d6 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go @@ -0,0 +1,42 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi_v2 + +import ( + "gopkg.in/yaml.v3" + + "github.com/google/gnostic-models/compiler" +) + +// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. +func ParseDocument(b []byte) (*Document, error) { + info, err := compiler.ReadInfoFromBytes("", b) + if err != nil { + return nil, err + } + root := info.Content[0] + return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) +} + +// YAMLValue produces a serialized YAML representation of the document. +func (d *Document) YAMLValue(comment string) ([]byte, error) { + rawInfo := d.ToRawInfo() + rawInfo = &yaml.Node{ + Kind: yaml.DocumentNode, + Content: []*yaml.Node{rawInfo}, + HeadComment: comment, + } + return yaml.Marshal(rawInfo) +} diff --git a/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json b/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json new file mode 100644 index 00000000..afa12b79 --- /dev/null +++ b/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go index d54a84db..4b1131ce 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go @@ -23,7 +23,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). @@ -8560,12 +8560,7 @@ func (m *Strings) ToRawInfo() *yaml.Node { if m == nil { return info } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) - } - } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go index 90a56f55..945b8d11 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.27.1 +// protoc v3.19.3 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -6760,13 +6760,12 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, - 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto similarity index 99% rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto index 7aede5ed..1be335b8 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; +option go_package = "./openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic/openapiv3/README.md b/vendor/github.com/google/gnostic-models/openapiv3/README.md similarity index 89% rename from vendor/github.com/google/gnostic/openapiv3/README.md rename to vendor/github.com/google/gnostic-models/openapiv3/README.md index 83603b82..5ee12d92 100644 --- a/vendor/github.com/google/gnostic/openapiv3/README.md +++ b/vendor/github.com/google/gnostic-models/openapiv3/README.md @@ -19,7 +19,3 @@ for OpenAPI. The schema-generator directory contains support code which generates openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). - -### How to rebuild - -`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto` \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go similarity index 96% rename from vendor/github.com/google/gnostic/openapiv3/document.go rename to vendor/github.com/google/gnostic-models/openapiv3/document.go index ef10d1d9..1cee4677 100644 --- a/vendor/github.com/google/gnostic/openapiv3/document.go +++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go @@ -17,7 +17,7 @@ package openapi_v3 import ( "gopkg.in/yaml.v3" - "github.com/google/gnostic/compiler" + "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation. diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go deleted file mode 100644 index ae242f30..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2022 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: openapiv3/annotations.proto - -package openapi_v3 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FileOptions)(nil), - ExtensionType: (*Document)(nil), - Field: 1143, - Name: "openapi.v3.document", - Tag: "bytes,1143,opt,name=document", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.MethodOptions)(nil), - ExtensionType: (*Operation)(nil), - Field: 1143, - Name: "openapi.v3.operation", - Tag: "bytes,1143,opt,name=operation", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.MessageOptions)(nil), - ExtensionType: (*Schema)(nil), - Field: 1143, - Name: "openapi.v3.schema", - Tag: "bytes,1143,opt,name=schema", - Filename: "openapiv3/annotations.proto", - }, - { - ExtendedType: (*descriptorpb.FieldOptions)(nil), - ExtensionType: (*Schema)(nil), - Field: 1143, - Name: "openapi.v3.property", - Tag: "bytes,1143,opt,name=property", - Filename: "openapiv3/annotations.proto", - }, -} - -// Extension fields to descriptorpb.FileOptions. -var ( - // optional openapi.v3.Document document = 1143; - E_Document = &file_openapiv3_annotations_proto_extTypes[0] -) - -// Extension fields to descriptorpb.MethodOptions. -var ( - // optional openapi.v3.Operation operation = 1143; - E_Operation = &file_openapiv3_annotations_proto_extTypes[1] -) - -// Extension fields to descriptorpb.MessageOptions. -var ( - // optional openapi.v3.Schema schema = 1143; - E_Schema = &file_openapiv3_annotations_proto_extTypes[2] -) - -// Extension fields to descriptorpb.FieldOptions. -var ( - // optional openapi.v3.Schema property = 1143; - E_Property = &file_openapiv3_annotations_proto_extTypes[3] -) - -var File_openapiv3_annotations_proto protoreflect.FileDescriptor - -var file_openapiv3_annotations_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, - 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f, - 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, - 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_openapiv3_annotations_proto_goTypes = []interface{}{ - (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions - (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions - (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions - (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions - (*Document)(nil), // 4: openapi.v3.Document - (*Operation)(nil), // 5: openapi.v3.Operation - (*Schema)(nil), // 6: openapi.v3.Schema -} -var file_openapiv3_annotations_proto_depIdxs = []int32{ - 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions - 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions - 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions - 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions - 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document - 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation - 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema - 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 4, // [4:8] is the sub-list for extension type_name - 0, // [0:4] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_openapiv3_annotations_proto_init() } -func file_openapiv3_annotations_proto_init() { - if File_openapiv3_annotations_proto != nil { - return - } - file_openapiv3_OpenAPIv3_proto_init() - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_openapiv3_annotations_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 4, - NumServices: 0, - }, - GoTypes: file_openapiv3_annotations_proto_goTypes, - DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, - ExtensionInfos: file_openapiv3_annotations_proto_extTypes, - }.Build() - File_openapiv3_annotations_proto = out.File - file_openapiv3_annotations_proto_rawDesc = nil - file_openapiv3_annotations_proto_goTypes = nil - file_openapiv3_annotations_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/vendor/github.com/google/gnostic/openapiv3/annotations.proto deleted file mode 100644 index 0bd87810..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/annotations.proto +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2022 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package openapi.v3; - -import "openapiv3/OpenAPIv3.proto"; -import "google/protobuf/descriptor.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "AnnotationsProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v3"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -// The Go package name. -option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; - -extend google.protobuf.FileOptions { - Document document = 1143; -} - -extend google.protobuf.MethodOptions { - Operation operation = 1143; -} - -extend google.protobuf.MessageOptions { - Schema schema = 1143; -} - -extend google.protobuf.FieldOptions { - Schema property = 1143; -} \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json deleted file mode 100644 index d5caed16..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json +++ /dev/null @@ -1,1251 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anysOrExpressions" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the Open API object or Operation Object, only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": false, - "patternProperties": { - "^[a-zA-Z0-9\\.\\-_]+$": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "anysOrExpressions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/anyOrExpression" - } - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json deleted file mode 100644 index ed0b83ad..00000000 --- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json +++ /dev/null @@ -1,1250 +0,0 @@ -{ - "title": "A JSON Schema for OpenAPI 3.0.", - "id": "http://openapis.org/v3/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "description": "This is the root document object of the OpenAPI document.", - "required": [ - "openapi", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "openapi": { - "type": "string" - }, - "info": { - "$ref": "#/definitions/info" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "components": { - "$ref": "#/definitions/components" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.", - "required": [ - "title", - "version" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "termsOfService": { - "type": "string" - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - }, - "version": { - "type": "string" - }, - "summary": { - "type": "string" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the exposed API.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - }, - "email": { - "type": "string", - "format": "email" - } - } - }, - "license": { - "type": "object", - "description": "License information for the exposed API.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "server": { - "type": "object", - "description": "An object representing a Server.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "url": { - "type": "string" - }, - "description": { - "type": "string" - }, - "variables": { - "$ref": "#/definitions/serverVariables" - } - } - }, - "serverVariable": { - "type": "object", - "description": "An object representing a Server Variable for server URL template substitution.", - "required": [ - "default" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "enum": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "default": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "components": { - "type": "object", - "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schemas": { - "$ref": "#/definitions/schemasOrReferences" - }, - "responses": { - "$ref": "#/definitions/responsesOrReferences" - }, - "parameters": { - "$ref": "#/definitions/parametersOrReferences" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "requestBodies": { - "$ref": "#/definitions/requestBodiesOrReferences" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "securitySchemes": { - "$ref": "#/definitions/securitySchemesOrReferences" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - } - } - }, - "paths": { - "type": "object", - "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.", - "additionalProperties": false, - "patternProperties": { - "^/": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "pathItem": { - "type": "object", - "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "trace": { - "$ref": "#/definitions/operation" - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - } - } - }, - "operation": { - "type": "object", - "description": "Describes a single API operation on a path.", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/definitions/parameterOrReference" - }, - "uniqueItems": true - }, - "requestBody": { - "$ref": "#/definitions/requestBodyOrReference" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "callbacks": { - "$ref": "#/definitions/callbacksOrReferences" - }, - "deprecated": { - "type": "boolean" - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "servers": { - "type": "array", - "items": { - "$ref": "#/definitions/server" - }, - "uniqueItems": true - } - } - }, - "externalDocs": { - "type": "object", - "description": "Allows referencing an external resource for extended documentation.", - "required": [ - "url" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string" - } - } - }, - "parameter": { - "type": "object", - "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.", - "required": [ - "name", - "in" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "requestBody": { - "type": "object", - "description": "Describes a single request body.", - "required": [ - "content" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "required": { - "type": "boolean" - } - } - }, - "mediaType": { - "type": "object", - "description": "Each Media Type Object provides schema and examples for the media type identified by its key.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "encoding": { - "$ref": "#/definitions/encodings" - } - } - }, - "encoding": { - "type": "object", - "description": "A single encoding definition applied to a single schema property.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "contentType": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - } - } - }, - "responses": { - "type": "object", - "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.", - "additionalProperties": false, - "patternProperties": { - "^([0-9X]{3})$": { - "$ref": "#/definitions/responseOrReference" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "default": { - "$ref": "#/definitions/responseOrReference" - } - } - }, - "response": { - "type": "object", - "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.", - "required": [ - "description" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "headers": { - "$ref": "#/definitions/headersOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - }, - "links": { - "$ref": "#/definitions/linksOrReferences" - } - } - }, - "callback": { - "type": "object", - "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.", - "additionalProperties": false, - "patternProperties": { - "^": { - "$ref": "#/definitions/pathItem" - }, - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - } - }, - "example": { - "type": "object", - "description": "", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "summary": { - "type": "string" - }, - "description": { - "type": "string" - }, - "value": { - "$ref": "#/definitions/any" - }, - "externalValue": { - "type": "string" - } - } - }, - "link": { - "type": "object", - "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "operationRef": { - "type": "string" - }, - "operationId": { - "type": "string" - }, - "parameters": { - "$ref": "#/definitions/anyOrExpression" - }, - "requestBody": { - "$ref": "#/definitions/anyOrExpression" - }, - "description": { - "type": "string" - }, - "server": { - "$ref": "#/definitions/server" - } - } - }, - "header": { - "type": "object", - "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "description": { - "type": "string" - }, - "required": { - "type": "boolean" - }, - "deprecated": { - "type": "boolean" - }, - "allowEmptyValue": { - "type": "boolean" - }, - "style": { - "type": "string" - }, - "explode": { - "type": "boolean" - }, - "allowReserved": { - "type": "boolean" - }, - "schema": { - "$ref": "#/definitions/schemaOrReference" - }, - "example": { - "$ref": "#/definitions/any" - }, - "examples": { - "$ref": "#/definitions/examplesOrReferences" - }, - "content": { - "$ref": "#/definitions/mediaTypes" - } - } - }, - "tag": { - "type": "object", - "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.", - "required": [ - "name" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - } - }, - "reference": { - "type": "object", - "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "summary": { - "type": "string" - }, - "description": { - "type": "string" - } - } - }, - "schema": { - "type": "object", - "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "nullable": { - "type": "boolean" - }, - "discriminator": { - "$ref": "#/definitions/discriminator" - }, - "readOnly": { - "type": "boolean" - }, - "writeOnly": { - "type": "boolean" - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": { - "$ref": "#/definitions/any" - }, - "deprecated": { - "type": "boolean" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/required" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "type": { - "type": "string" - }, - "allOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "oneOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "anyOf": { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - }, - "not": { - "$ref": "#/definitions/schema" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "array", - "items": { - "$ref": "#/definitions/schemaOrReference" - }, - "minItems": 1 - } - ] - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schemaOrReference" - }, - { - "type": "boolean" - } - ] - }, - "default": { - "$ref": "#/definitions/defaultType" - }, - "description": { - "type": "string" - }, - "format": { - "type": "string" - } - } - }, - "discriminator": { - "type": "object", - "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.", - "required": [ - "propertyName" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "propertyName": { - "type": "string" - }, - "mapping": { - "$ref": "#/definitions/strings" - } - } - }, - "xml": { - "type": "object", - "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean" - }, - "wrapped": { - "type": "boolean" - } - } - }, - "securityScheme": { - "type": "object", - "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.", - "required": [ - "type" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "type": { - "type": "string" - }, - "description": { - "type": "string" - }, - "name": { - "type": "string" - }, - "in": { - "type": "string" - }, - "scheme": { - "type": "string" - }, - "bearerFormat": { - "type": "string" - }, - "flows": { - "$ref": "#/definitions/oauthFlows" - }, - "openIdConnectUrl": { - "type": "string" - } - } - }, - "oauthFlows": { - "type": "object", - "description": "Allows configuration of the supported OAuth Flows.", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "implicit": { - "$ref": "#/definitions/oauthFlow" - }, - "password": { - "$ref": "#/definitions/oauthFlow" - }, - "clientCredentials": { - "$ref": "#/definitions/oauthFlow" - }, - "authorizationCode": { - "$ref": "#/definitions/oauthFlow" - } - } - }, - "oauthFlow": { - "type": "object", - "description": "Configuration details for a supported OAuth Flow", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/specificationExtension" - } - }, - "properties": { - "authorizationUrl": { - "type": "string" - }, - "tokenUrl": { - "type": "string" - }, - "refreshUrl": { - "type": "string" - }, - "scopes": { - "$ref": "#/definitions/strings" - } - } - }, - "securityRequirement": { - "type": "object", - "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "anyOrExpression": { - "oneOf": [ - { - "$ref": "#/definitions/any" - }, - { - "$ref": "#/definitions/expression" - } - ] - }, - "callbackOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/callback" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "exampleOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/example" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "headerOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/header" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "linkOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/link" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "parameterOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "requestBodyOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/requestBody" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "responseOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "schemaOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "securitySchemeOrReference": { - "oneOf": [ - { - "$ref": "#/definitions/securityScheme" - }, - { - "$ref": "#/definitions/reference" - } - ] - }, - "callbacksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/callbackOrReference" - } - }, - "encodings": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/encoding" - } - }, - "examplesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/exampleOrReference" - } - }, - "headersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/headerOrReference" - } - }, - "linksOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/linkOrReference" - } - }, - "mediaTypes": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/mediaType" - } - }, - "parametersOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameterOrReference" - } - }, - "requestBodiesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/requestBodyOrReference" - } - }, - "responsesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/responseOrReference" - } - }, - "schemasOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schemaOrReference" - } - }, - "securitySchemesOrReferences": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/securitySchemeOrReference" - } - }, - "serverVariables": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/serverVariable" - } - }, - "strings": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "object": { - "type": "object", - "additionalProperties": true - }, - "any": { - "additionalProperties": true - }, - "expression": { - "type": "object", - "additionalProperties": true - }, - "specificationExtension": { - "description": "Any property starting with x- is valid.", - "oneOf": [ - { - "type": "null" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - }, - { - "type": "object" - }, - { - "type": "array" - } - ] - }, - "defaultType": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "array" - }, - { - "type": "object" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "string" - } - ] - } - } -} diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index c8a1beb8..860bb304 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -258,10 +258,10 @@ func (p *Profile) postDecode() error { // If this a main linux kernel mapping with a relocation symbol suffix // ("[kernel.kallsyms]_text"), extract said suffix. // It is fairly hacky to handle at this level, but the alternatives appear even worse. - if strings.HasPrefix(m.File, "[kernel.kallsyms]") { - m.KernelRelocationSymbol = strings.ReplaceAll(m.File, "[kernel.kallsyms]", "") + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] } - } functions := make(map[uint64]*Function, len(p.Function)) @@ -530,6 +530,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -538,6 +539,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53..4580bab1 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 4b66282c..eee0132e 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -326,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 4ec00fe7..5551eb0b 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -72,9 +72,23 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - Label map[string][]string + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. NumLabel map[string][]int64 - NumUnit map[string][]string + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -131,6 +145,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -422,7 +437,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -444,7 +459,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -452,6 +467,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -613,10 +634,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" @@ -715,6 +737,35 @@ func (s *Sample) HasLabel(key, value string) bool { return false } +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { @@ -796,7 +847,7 @@ func (p *Profile) HasFileLines() bool { // "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829d..7ec5ac7e 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4be..dc60082d 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb..3167b643 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index ec91408f..76577dc7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,126 @@ +## 2.19.0 + +### Features + +[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering. + +## 2.18.0 + +### Features +- Add --slience-skips and --force-newlines [f010b65] +- fail when no tests were run and --fail-on-empty was set [d80eebe] + +### Fixes +- Fix table entry context edge case [42013d6] + +### Maintenance +- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7] +- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd] +- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7] + +## 2.17.3 + +### Fixes +`ginkgo watch` now ignores hidden files [bde6e00] + +## 2.17.2 + +### Fixes +- fix: close files [32259c8] +- fix github output log level for skipped specs [780e7a3] + +### Maintenance +- Bump github.com/google/pprof [d91fe4e] +- Bump github.com/go-task/slim-sprig to v3 [8cb662e] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422] +- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4] +- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8] +- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4] +- Fix test for gomega version bump [f2fcd97] +- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2] +- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26] +- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170] +- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a] + +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + +## 2.16.0 + +### Features +- add SpecContext to reporting nodes + +### Fixes +- merge coverages instead of combining them (#1329) (#1340) [23f0cc5] +- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7] + +### Maintenance +- docs/index.md: Typo [2cebe8d] +- fix docs [06de431] +- chore: test with Go 1.22 (#1352) [898cba9] +- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120] +- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed] +- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69] +- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d] +- Fix docs for handling failures in goroutines (#1339) [4471b2e] + +## 2.15.0 + +### Features + +- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70] +- include cancellation reason when cancelling spec context [96e915c] + +### Fixes + +- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09] +- fix outline when using nodot in ginkgo v2 [dca77c8] +- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f] +- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14] + +### Maintenance + +- Bump to go 1.20 [4fcd0b3] + +## 2.14.0 + +### Features +You can now use `GinkgoTB()` when you need an instance of `testing.TB` to pass to a library. + +Prior to this release table testing only supported generating individual `It`s for each test entry. `DescribeTableSubtree` extends table testing support to entire testing subtrees - under the hood `DescrieTableSubtree` generates a new container for each entry and invokes your function to fill our the container. See the [docs](https://onsi.github.io/ginkgo/#generating-subtree-tables) to learn more. + +- Introduce DescribeTableSubtree [65ec56d] +- add GinkgoTB() to docs [4a2c832] +- Add GinkgoTB() function (#1333) [92b6744] + +### Fixes +- Fix typo in internal/suite.go (#1332) [beb9507] +- Fix typo in docs/index.md (#1319) [4ac3a13] +- allow wasm to compile with ginkgo present (#1311) [b2e5bc5] + +### Maintenance +- Bump golang.org/x/tools from 0.16.0 to 0.16.1 (#1316) [465a8ec] +- Bump actions/setup-go from 4 to 5 (#1313) [eab0e40] +- Bump github/codeql-action from 2 to 3 (#1317) [fbf9724] +- Bump golang.org/x/crypto (#1318) [3ee80ee] +- Bump golang.org/x/tools from 0.14.0 to 0.16.0 (#1306) [123e1d5] +- Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#1297) [558f6e0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#1307) [84ff7f3] + ## 2.13.2 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 1da92fe7..80de566a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -6,8 +6,10 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - Ensure adequate test coverage: - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder). - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. -- Make sure all the tests succeed via `ginkgo -r -p` -- Vet your changes via `go vet ./...` -- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Run `make` or: + - Install ginkgo locally via `go install ./...` + - Make sure all the tests succeed via `ginkgo -r -p` + - Vet your changes via `go vet ./...` +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes. -Thanks for supporting Ginkgo! \ No newline at end of file +Thanks for supporting Ginkgo! diff --git a/vendor/github.com/onsi/ginkgo/v2/Makefile b/vendor/github.com/onsi/ginkgo/v2/Makefile new file mode 100644 index 00000000..cb099aff --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/Makefile @@ -0,0 +1,11 @@ +# default task since it's first +.PHONY: all +all: vet test + +.PHONY: test +test: + go run github.com/onsi/ginkgo/v2/ginkgo -r -p + +.PHONY: vet +vet: + go vet ./... diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 2d7a70ec..a3e8237e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels { return suiteLabels } +func getwd() (string, error) { + if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") { + // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache + // is shared between two different directories with the same test code. + return os.Getwd() + } + return "", nil +} + /* PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. See http://onsi.github.io/ginkgo/#previewing-specs for more information. @@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report { err = global.Suite.BuildTree() exitIfErr(err) - suitePath, err := os.Getwd() + suitePath, err := getwd() exitIfErr(err) suitePath, err = filepath.Abs(suitePath) exitIfErr(err) @@ -783,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7..b2dc59be 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec9..cf3b7cb6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go new file mode 100644 index 00000000..3c5079ff --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go @@ -0,0 +1,129 @@ +// Copyright (c) 2015, Wade Simmons +// All rights reserved. + +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: + +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gocovmerge takes the results from multiple `go test -coverprofile` +// runs and merges them into one profile + +// this file was originally taken from the gocovmerge project +// see also: https://go.shabbyrobe.org/gocovmerge +package internal + +import ( + "fmt" + "io" + "sort" + + "golang.org/x/tools/cover" +) + +func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile { + i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName }) + if i < len(profiles) && profiles[i].FileName == p.FileName { + MergeCoverProfiles(profiles[i], p) + } else { + profiles = append(profiles, nil) + copy(profiles[i+1:], profiles[i:]) + profiles[i] = p + } + return profiles +} + +func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error { + if len(profiles) == 0 { + return nil + } + if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil { + return err + } + for _, p := range profiles { + for _, b := range p.Blocks { + if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil { + return err + } + } + } + return nil +} + +func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error { + if into.Mode != merge.Mode { + return fmt.Errorf("cannot merge profiles with different modes") + } + // Since the blocks are sorted, we can keep track of where the last block + // was inserted and only look at the blocks after that as targets for merge + startIndex := 0 + for _, b := range merge.Blocks { + var err error + startIndex, err = mergeProfileBlock(into, b, startIndex) + if err != nil { + return err + } + } + return nil +} + +func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) { + sortFunc := func(i int) bool { + pi := p.Blocks[i+startIndex] + return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol) + } + + i := 0 + if sortFunc(i) != true { + i = sort.Search(len(p.Blocks)-startIndex, sortFunc) + } + + i += startIndex + if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol { + if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol { + return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb) + } + switch p.Mode { + case "set": + p.Blocks[i].Count |= pb.Count + case "count", "atomic": + p.Blocks[i].Count += pb.Count + default: + return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode) + } + + } else { + if i > 0 { + pa := p.Blocks[i-1] + if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) { + return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb) + } + } + if i < len(p.Blocks)-1 { + pa := p.Blocks[i+1] + if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) { + return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb) + } + } + p.Blocks = append(p.Blocks, cover.ProfileBlock{}) + copy(p.Blocks[i+1:], p.Blocks[i:]) + p.Blocks[i] = pb + } + + return i + 1, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index bd3c6d02..8e16d2bb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -1,7 +1,6 @@ package internal import ( - "bytes" "fmt" "os" "os/exec" @@ -12,6 +11,7 @@ import ( "github.com/google/pprof/profile" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/cover" ) func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { @@ -144,38 +144,27 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC return messages, nil } -//loads each profile, combines them, deletes them, stores them in destination +// loads each profile, merges them, deletes them, stores them in destination func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { - combined := &bytes.Buffer{} - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for i, profile := range profiles { - contents, err := os.ReadFile(profile) + var merged []*cover.Profile + for _, file := range profiles { + parsedProfiles, err := cover.ParseProfiles(file) if err != nil { - return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + return err } - os.Remove(profile) - - // remove the cover mode line from every file - // except the first one - if i > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + os.Remove(file) + for _, p := range parsedProfiles { + merged = AddCoverProfile(merged, p) } } - - err := os.WriteFile(destination, combined.Bytes(), 0666) + dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer dst.Close() + err = DumpCoverProfiles(merged, dst) if err != nil { - return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + return err } return nil } @@ -184,7 +173,7 @@ func GetCoverageFromCoverProfile(profile string) (float64, error) { cmd := exec.Command("go", "tool", "cover", "-func", profile) output, err := cmd.CombinedOutput() if err != nil { - return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output)) } re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) matches := re.FindStringSubmatch(string(output)) @@ -208,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 958daccb..5d8d00bb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -1,10 +1,11 @@ package outline import ( - "github.com/onsi/ginkgo/v2/types" "go/ast" "go/token" "strconv" + + "github.com/onsi/ginkgo/v2/types" ) const ( diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go index 67ec5ab7..f0a6b5d2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -28,14 +28,7 @@ func packageNameForImport(f *ast.File, path string) *string { } name := spec.Name.String() if name == "" { - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } + name = "ginkgo" } if name == "." { name = "" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go index 17d052bd..0e6ae1f2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strings" "time" ) @@ -79,6 +80,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti continue } + if isHiddenFile(info) { + continue + } + if goTestRegExp.MatchString(info.Name()) { testHash += p.hashForFileInfo(info) if info.ModTime().After(testModifiedTime) { @@ -103,6 +108,10 @@ func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Ti return } +func isHiddenFile(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_") +} + func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 28447ffd..02c6739e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,7 +1,10 @@ package ginkgo import ( + "testing" + "github.com/onsi/ginkgo/v2/internal/testingtproxy" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -12,10 +15,15 @@ GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's GinkgoT() takes an optional offset argument that can be used to get the correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following: + +- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf. +- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model. + You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { - offset := 3 + offset := 1 if len(optionalOffset) > 0 { offset = optionalOffset[0] } @@ -41,21 +49,21 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the type GinkgoTInterface interface { Cleanup(func()) Setenv(kev, value string) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) + Error(args ...any) + Errorf(format string, args ...any) Fail() FailNow() Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) Helper() - Log(args ...interface{}) - Logf(format string, args ...interface{}) + Log(args ...any) + Logf(format string, args ...any) Name() string Parallel() - Skip(args ...interface{}) + Skip(args ...any) SkipNow() - Skipf(format string, args ...interface{}) + Skipf(format string, args ...any) Skipped() bool TempDir() string } @@ -71,9 +79,9 @@ type FullGinkgoTInterface interface { AddReportEntryVisibilityNever(name string, args ...any) //Prints to the GinkgoWriter - Print(a ...interface{}) - Printf(format string, a ...interface{}) - Println(a ...interface{}) + Print(a ...any) + Printf(format string, a ...any) + Println(a ...any) //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo F(format string, args ...any) string @@ -92,3 +100,81 @@ type FullGinkgoTInterface interface { AttachProgressReporter(func() string) func() } + +/* +GinkgoTB() implements a wrapper that exactly matches the testing.TB interface. + +In go 1.18 a new private() function was added to the testing.TB interface. Any function which accepts testing.TB as input needs to be passed in something that directly implements testing.TB. + +This wrapper satisfies the testing.TB interface and intended to be used as a drop-in replacement with third party libraries that accept testing.TB. + +Similar to GinkgoT(), GinkgoTB() takes an optional offset argument that can be used to get the +correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately +*/ +func GinkgoTB(optionalOffset ...int) *GinkgoTBWrapper { + offset := 2 + if len(optionalOffset) > 0 { + offset = optionalOffset[0] + } + return &GinkgoTBWrapper{GinkgoT: GinkgoT(offset)} +} + +type GinkgoTBWrapper struct { + testing.TB + GinkgoT FullGinkgoTInterface +} + +func (g *GinkgoTBWrapper) Cleanup(f func()) { + g.GinkgoT.Cleanup(f) +} +func (g *GinkgoTBWrapper) Error(args ...any) { + g.GinkgoT.Error(args...) +} +func (g *GinkgoTBWrapper) Errorf(format string, args ...any) { + g.GinkgoT.Errorf(format, args...) +} +func (g *GinkgoTBWrapper) Fail() { + g.GinkgoT.Fail() +} +func (g *GinkgoTBWrapper) FailNow() { + g.GinkgoT.FailNow() +} +func (g *GinkgoTBWrapper) Failed() bool { + return g.GinkgoT.Failed() +} +func (g *GinkgoTBWrapper) Fatal(args ...any) { + g.GinkgoT.Fatal(args...) +} +func (g *GinkgoTBWrapper) Fatalf(format string, args ...any) { + g.GinkgoT.Fatalf(format, args...) +} +func (g *GinkgoTBWrapper) Helper() { + types.MarkAsHelper(1) +} +func (g *GinkgoTBWrapper) Log(args ...any) { + g.GinkgoT.Log(args...) +} +func (g *GinkgoTBWrapper) Logf(format string, args ...any) { + g.GinkgoT.Logf(format, args...) +} +func (g *GinkgoTBWrapper) Name() string { + return g.GinkgoT.Name() +} +func (g *GinkgoTBWrapper) Setenv(key, value string) { + g.GinkgoT.Setenv(key, value) +} +func (g *GinkgoTBWrapper) Skip(args ...any) { + g.GinkgoT.Skip(args...) +} +func (g *GinkgoTBWrapper) SkipNow() { + g.GinkgoT.SkipNow() +} +func (g *GinkgoTBWrapper) Skipf(format string, args ...any) { + g.GinkgoT.Skipf(format, args...) +} +func (g *GinkgoTBWrapper) Skipped() bool { + return g.GinkgoT.Skipped() +} +func (g *GinkgoTBWrapper) TempDir() string { + return g.GinkgoT.TempDir() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 16f0dc22..6a15f19a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -5,9 +5,8 @@ import ( "fmt" "reflect" "sort" - "time" - "sync" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0) var _global_id_mutex = &sync.Mutex{} func UniqueNodeID() uint { - //There's a reace in the internal integration tests if we don't make - //accessing _global_node_id_counter safe across goroutines. + // There's a reace in the internal integration tests if we don't make + // accessing _global_node_id_counter safe across goroutines. _global_id_mutex.Lock() defer _global_id_mutex.Unlock() _global_node_id_counter += 1 @@ -44,8 +43,8 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportSuiteBody func(types.Report) + ReportEachBody func(SpecContext, types.SpecReport) + ReportSuiteBody func(SpecContext, types.Report) MarkedFocus bool MarkedPending bool @@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy args = unrollInterfaceSlice(args) remainingArgs := []interface{}{} - //First get the CodeLocation up-to-date + // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { case Offset: @@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError := false args = remainingArgs remainingArgs = []interface{}{} - //now process the rest of the args + // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): - break //ignore deprecated timeouts + break // ignore deprecated timeouts case t == reflect.TypeOf(Focus): node.MarkedFocus = bool(arg.(focusType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy node.Body = func(SpecContext) { body() } } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { if node.ReportEachBody == nil { - node.ReportEachBody = arg.(func(types.SpecReport)) + if fn, ok := arg.(func(types.SpecReport)); ok { + node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) } + } else { + node.ReportEachBody = arg.(func(SpecContext, types.SpecReport)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { if node.ReportSuiteBody == nil { - node.ReportSuiteBody = arg.(func(types.Report)) + if fn, ok := arg.(func(types.Report)); ok { + node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) } + } else { + node.ReportSuiteBody = arg.(func(SpecContext, types.Report)) + node.HasContext = true + } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } - //validations + // validations if node.MarkedPending && node.MarkedFocus { appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go new file mode 100644 index 00000000..4c374935 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go @@ -0,0 +1,7 @@ +//go:build wasm + +package internal + +func NewOutputInterceptor() OutputInterceptor { + return &NoopOutputInterceptor{} +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go new file mode 100644 index 00000000..8c53fe0a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go @@ -0,0 +1,10 @@ +//go:build wasm + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2515b84a..2d2ea2fc 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -17,7 +17,7 @@ type specContext struct { context.Context *ProgressReporterManager - cancel context.CancelFunc + cancel context.CancelCauseFunc suite *Suite } @@ -30,7 +30,7 @@ Note that while SpecContext is used to enforce deadlines by Ginkgo it is not con This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. */ func NewSpecContext(suite *Suite) *specContext { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancelCause(context.Background()) sc := &specContext{ cancel: cancel, suite: suite, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index fe6e8288..a3c9e6bf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -79,7 +79,7 @@ func NewSuite() *Suite { func (suite *Suite) Clone() (*Suite, error) { if suite.phase != PhaseBuildTopLevel { - return nil, fmt.Errorf("cnanot clone suite after tree has been built") + return nil, fmt.Errorf("cannot clone suite after tree has been built") } return &Suite{ tree: &TreeNode{}, @@ -489,10 +489,15 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } - if specs.HasAnySpecsMarkedPending() && suite.config.FailOnPending { + if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") suite.report.SuiteSucceeded = false } + + if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set") + suite.report.SuiteSucceeded = false + } } if ranBeforeSuite { @@ -594,8 +599,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func(SpecContext) { - nodes[i].ReportEachBody(report) + nodes[i].Body = func(ctx SpecContext) { + nodes[i].ReportEachBody(ctx, report) } state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) @@ -762,7 +767,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportSuiteBody(report) } + node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() @@ -840,7 +845,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { - //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) timeoutInPlay = "node" @@ -858,7 +863,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } sc := NewSpecContext(suite) - defer sc.cancel() + defer sc.cancel(fmt.Errorf("spec has finished")) suite.selectiveLock.Lock() suite.currentSpecContext = sc @@ -918,9 +923,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ if outcomeFromRun != types.SpecStatePassed { additionalFailure := types.AdditionalFailure{ State: outcomeFromRun, - Failure: failure, //we make a copy - this will include all the configuration set up above... + Failure: failure, // we make a copy - this will include all the configuration set up above... } - //...and then we update the failure with the details from failureFromRun + // ...and then we update the failure with the details from failureFromRun additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation additionalFailure.Failure.ProgressReport = types.ProgressReport{} if outcome == types.SpecStateTimedout { @@ -958,8 +963,8 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck - sc.cancel() - //and now we wait for the grace period + sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) + // and now we wait for the grace period gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() @@ -985,7 +990,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } progressReport = progressReport.WithoutOtherGoroutines() - sc.cancel() + sc.cancel(fmt.Errorf(interruptStatus.Message())) if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { if interruptStatus.ShouldIncludeProgressReport() { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be75..48073048 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,10 +182,31 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel + //should we completely omit this spec? + if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { + return + } + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) @@ -262,9 +283,12 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } - // If we have no content to show, jsut emit the header and return + // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) + if r.conf.ForceNewlines { + r.emit("\n") + } return } @@ -283,26 +307,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +426,15 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 81604220..562e0f62 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,6 +15,7 @@ import ( "fmt" "os" "path" + "regexp" "strings" "github.com/onsi/ginkgo/v2/config" @@ -104,6 +105,8 @@ type JUnitProperty struct { Value string `xml:"value,attr"` } +var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) + type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` @@ -113,6 +116,8 @@ type JUnitTestCase struct { Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` + // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. + Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted @@ -172,6 +177,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, + {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, @@ -195,6 +201,12 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + owner := "" + for _, label := range labels { + if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { + owner = matches[1] + } + } name = strings.TrimSpace(name) test := JUnitTestCase{ @@ -202,6 +214,7 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), + Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) @@ -312,6 +325,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index f33786a2..aa1a3517 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) { /* ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that -receives a SpecReport. They are called before the spec starts. +receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts. + +Example: + + ReportBeforeEach(func(report SpecReport) { // process report }) + ReportBeforeEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { +func ReportBeforeEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that -receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. +ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. +ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior. +They are called after the spec has completed and receive the final report for the spec. + +Example: + + ReportAfterEach(func(report SpecReport) { // process report }) + ReportAfterEach(func(ctx SpecContext, report SpecReport) { + // process report + }), NodeTimeout(1 * time.Minute)) You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { +func ReportAfterEach(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) @@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { } /* -ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function +that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportBeforeSuite(func(r Report) { // process report }) + ReportBeforeSuite(func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) @@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportBeforeSuite(body func(Report), args ...interface{}) bool { +func ReportBeforeSuite(body any, args ...any) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) } /* -ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. +ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion, +and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior. + +Example Usage: + + ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report }) + ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) { + // process report + }, NodeTimeout(1 * time.Minute)) They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. -ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) +ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes @@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports + +You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes */ -func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { +func ReportAfterSuite(text string, body any, args ...interface{}) bool { combinedArgs := []interface{}{body} combinedArgs = append(combinedArgs, args...) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index ac9b7abb..c7de7a8b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -46,7 +46,7 @@ And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-s */ func DescribeTable(description string, args ...interface{}) bool { GinkgoHelper() - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -56,7 +56,7 @@ You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. func FDescribeTable(description string, args ...interface{}) bool { GinkgoHelper() args = append(args, internal.Focus) - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -66,7 +66,7 @@ You can mark a table as pending with `PDescribeTable`. This is equivalent to `P func PDescribeTable(description string, args ...interface{}) bool { GinkgoHelper() args = append(args, internal.Pending) - generateTable(description, args...) + generateTable(description, false, args...) return true } @@ -75,6 +75,71 @@ You can mark a table as pending with `XDescribeTable`. This is equivalent to `X */ var XDescribeTable = PDescribeTable +/* +DescribeTableSubtree describes a table-driven spec that generates a set of tests for each entry. + +For example: + + DescribeTableSubtree("a subtree table", + func(url string, code int, message string) { + var resp *http.Response + BeforeEach(func() { + var err error + resp, err = http.Get(url) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(resp.Body.Close) + }) + + It("should return the expected status code", func() { + Expect(resp.StatusCode).To(Equal(code)) + }) + + It("should return the expected message", func() { + body, err := ioutil.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(body)).To(Equal(message)) + }) + }, + Entry("default response", "example.com/response", http.StatusOK, "hello world"), + Entry("missing response", "example.com/missing", http.StatusNotFound, "wat?"), + ) + +Note that you **must** place define an It inside the body function. + +You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs +And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns +*/ +func DescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + generateTable(description, true, args...) + return true +} + +/* +You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`. +*/ +func FDescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + args = append(args, internal.Focus) + generateTable(description, true, args...) + return true +} + +/* +You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`. +*/ +func PDescribeTableSubtree(description string, args ...interface{}) bool { + GinkgoHelper() + args = append(args, internal.Pending) + generateTable(description, true, args...) + return true +} + +/* +You can mark a table as pending with `XDescribeTableSubtree`. This is equivalent to `XDescribe`. +*/ +var XDescribeTableSubtree = PDescribeTableSubtree + /* TableEntry represents an entry in a table test. You generally use the `Entry` constructor. */ @@ -131,14 +196,14 @@ var XEntry = PEntry var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() -func generateTable(description string, args ...interface{}) { +func generateTable(description string, isSubtree bool, args ...interface{}) { GinkgoHelper() cl := types.NewCodeLocation(0) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} - var itBody interface{} - var itBodyType reflect.Type + var internalBody interface{} + var internalBodyType reflect.Type var tableLevelEntryDescription interface{} tableLevelEntryDescription = func(args ...interface{}) string { @@ -166,11 +231,11 @@ func generateTable(description string, args ...interface{}) { case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): tableLevelEntryDescription = arg case t.Kind() == reflect.Func: - if itBody != nil { + if internalBody != nil { exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) } - itBody = arg - itBodyType = reflect.TypeOf(itBody) + internalBody = arg + internalBodyType = reflect.TypeOf(internalBody) default: containerNodeArgs = append(containerNodeArgs, arg) } @@ -200,39 +265,51 @@ func generateTable(description string, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - itNodeArgs := []interface{}{entry.codeLocation} - itNodeArgs = append(itNodeArgs, entry.decorations...) + internalNodeArgs := []interface{}{entry.codeLocation} + internalNodeArgs = append(internalNodeArgs, entry.decorations...) hasContext := false - if itBodyType.NumIn() > 0. { - if itBodyType.In(0).Implements(specContextType) { + if internalBodyType.NumIn() > 0 { + if internalBodyType.In(0).Implements(specContextType) { hasContext = true - } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + } else if internalBodyType.In(0).Implements(contextType) { hasContext = true + if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) { + // we allow you to pass in a non-nil context + hasContext = false + } } } if err == nil { - err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) + err = validateParameters(internalBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) } if hasContext { - itNodeArgs = append(itNodeArgs, func(c SpecContext) { + internalNodeArgs = append(internalNodeArgs, func(c SpecContext) { if err != nil { panic(err) } - invokeFunction(itBody, append([]interface{}{c}, entry.parameters...)) + invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...)) }) + if isSubtree { + exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl)) + } } else { - itNodeArgs = append(itNodeArgs, func() { + internalNodeArgs = append(internalNodeArgs, func() { if err != nil { panic(err) } - invokeFunction(itBody, entry.parameters) + invokeFunction(internalBody, entry.parameters) }) } - pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) + internalNodeType := types.NodeTypeIt + if isSubtree { + internalNodeType = types.NodeTypeContainer + } + + pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...)) } }) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a..66463cf5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -25,6 +25,7 @@ type SuiteConfig struct { SkipFiles []string LabelFilter string FailOnPending bool + FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int @@ -89,6 +90,9 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool + SilenceSkips bool + ForceNewlines bool JSONReport string JUnitReport string @@ -264,7 +268,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -274,6 +278,8 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, + {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", + Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, @@ -331,6 +337,12 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, + {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", + Usage: "If set, default reporter will not print out skipped tests."}, + {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", + Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 4fbdc3e9..6bb72d00 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -505,6 +505,15 @@ func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, ac } } +func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { + return GinkgoError{ + Heading: "Contexts cannot be used in subtree tables", + Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", + CodeLocation: cl, + DocLink: "table-specs", + } +} + /* Parallel Synchronization errors */ func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae87..de69f302 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index b0d3b651..7fdc8aa2 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -45,6 +45,83 @@ func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } +func labelSetFor(key string, labels []string) map[string]bool { + key = strings.ToLower(strings.TrimSpace(key)) + out := map[string]bool{} + for _, label := range labels { + components := strings.SplitN(label, ":", 2) + if len(components) < 2 { + continue + } + if key == strings.ToLower(strings.TrimSpace(components[0])) { + out[strings.ToLower(strings.TrimSpace(components[1]))] = true + } + } + + return out +} + +func isEmptyLabelSetAction(key string) LabelFilter { + return func(labels []string) bool { + return len(labelSetFor(key, labels)) == 0 + } +} + +func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if set[value] { + return true + } + } + return false + } +} + +func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { + return func(labels []string) bool { + set := labelSetFor(key, labels) + if len(set) != len(expectedValues) { + return false + } + for _, value := range expectedValues { + if !set[value] { + return false + } + } + return true + } +} + +func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { + expectedSet := map[string]bool{} + for _, value := range expectedValues { + expectedSet[value] = true + } + return func(labels []string) bool { + set := labelSetFor(key, labels) + for value := range set { + if !expectedSet[value] { + return false + } + } + return true + } +} + type lfToken uint const ( @@ -58,6 +135,9 @@ const ( lfTokenOr lfTokenRegexp lfTokenLabel + lfTokenSetKey + lfTokenSetOperation + lfTokenSetArgument lfTokenEOF ) @@ -71,6 +151,8 @@ func (l lfToken) Precedence() int { return 2 case lfTokenNot: return 3 + case lfTokenSetOperation: + return 4 } return -1 } @@ -93,6 +175,12 @@ func (l lfToken) String() string { return "/regexp/" case lfTokenLabel: return "label" + case lfTokenSetKey: + return "set_key" + case lfTokenSetOperation: + return "set_operation" + case lfTokenSetArgument: + return "set_argument" case lfTokenEOF: return "EOF" } @@ -148,6 +236,35 @@ func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil + case lfTokenSetOperation: + tokenSetOperation := strings.ToLower(tn.value) + if tokenSetOperation == "isempty" { + return isEmptyLabelSetAction(tn.leftNode.value), nil + } + if tn.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) + } + + rawValues := strings.Split(tn.rightNode.value, ",") + values := make([]string, len(rawValues)) + for i := range rawValues { + values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) + if strings.ContainsAny(values[i], "&|!,()/") { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) + } else if values[i] == "" { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") + } + } + switch tokenSetOperation { + case "containsany": + return containsAnyLabelSetAction(tn.leftNode.value, values), nil + case "containsall": + return containsAllLabelSetAction(tn.leftNode.value, values), nil + case "consistsof": + return consistsOfLabelSetAction(tn.leftNode.value, values), nil + case "issubsetof": + return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil + } } if tn.rightNode == nil { @@ -203,7 +320,17 @@ func (tn *treeNode) toString(indent int) string { return out } +var validSetOperations = map[string]string{ + "containsany": "containsAny", + "containsall": "containsAll", + "consistsof": "consistsOf", + "issubsetof": "isSubsetOf", + "isempty": "isEmpty", +} + func tokenize(input string) func() (*treeNode, error) { + lastToken := lfTokenInvalid + lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { @@ -233,6 +360,53 @@ func tokenize(input string) func() (*treeNode, error) { } node := &treeNode{location: i} + defer func() { + lastToken = node.token + lastValue = node.value + }() + + if lastToken == lfTokenSetKey { + //we should get a valid set operation next + value, n := consumeUntil(" )") + if validSetOperations[strings.ToLower(value)] == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) + } + i += n + node.token, node.value = lfTokenSetOperation, value + return node, nil + } + if lastToken == lfTokenSetOperation { + //we should get an argument next, if we aren't isempty + var arg = "" + origI := i + if runes[i] == '{' { + i += 1 + value, n := consumeUntil("}") + if i+n >= len(runes) { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") + } + i += n + 1 + arg = value + } else { + value, n := consumeUntil("&|!,()/") + i += n + arg = strings.TrimSpace(value) + } + if strings.ToLower(lastValue) == "isempty" && arg != "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) + } + if arg == "" && strings.ToLower(lastValue) != "isempty" { + if i < len(runes) && runes[i] == '/' { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") + } else { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) + } + } + // note that we sent an empty SetArgument token if we are isempty + node.token, node.value = lfTokenSetArgument, arg + return node, nil + } + switch runes[i] { case '&': if !peekIs('&') { @@ -264,8 +438,38 @@ func tokenize(input string) func() (*treeNode, error) { i += n + 1 node.token, node.value = lfTokenRegexp, value default: - value, n := consumeUntil("&|!,()/") + value, n := consumeUntil("&|!,()/:") i += n + value = strings.TrimSpace(value) + + //are we the beginning of a set operation? + if i < len(runes) && runes[i] == ':' { + if peekIs(' ') { + if value == "" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") + } + i += 1 + //we are the beginning of a set operation + node.token, node.value = lfTokenSetKey, value + return node, nil + } + additionalValue, n := consumeUntil("&|!,()/") + additionalValue = strings.TrimSpace(additionalValue) + if additionalValue == ":" { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") + } + i += n + value += additionalValue + } + + valueToCheckForSetOperation := strings.ToLower(value) + for setOperation := range validSetOperations { + idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) + if idx > 0 { + return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) + } + } + node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil @@ -307,7 +511,7 @@ LOOP: switch node.token { case lfTokenEOF: break LOOP - case lfTokenLabel, lfTokenRegexp: + case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } @@ -326,6 +530,18 @@ LOOP: node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node + case lfTokenSetOperation: + if current.rightNode == nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) + } + node.setLeftNode(current.rightNode) + current.setRightNode(node) + current = node + case lfTokenSetArgument: + if current.rightNode != nil { + return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) + } + current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { @@ -354,5 +570,14 @@ func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } + if out[0] == ':' { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + if strings.Contains(out, ":") { + components := strings.SplitN(out, ":", 2) + if len(components) < 2 || components[1] == "" { + return "", GinkgoErrors.InvalidLabel(label, cl) + } + } return out, nil } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index a4a1524b..acab0349 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.13.2" +const VERSION = "2.19.0" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 4fc45f29..62af14ad 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,69 @@ +## 1.33.1 + +### Fixes +- fix confusing eventually docs [3a66379] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a] + +## 1.33.0 + +### Features + +`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer. + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb] +- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596] + +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + +## 1.31.1 + +### Fixes +- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999] +- Update test in case keeping msg is desired [ad1a367] + +### Maintenance +- Show how to import the format sub package [24e958d] +- tidy up go.sum [26661b8] +- bump dependencies [bde8f7a] + +## 1.31.0 + +### Features +- Async assertions include context cancellation cause if present [121c37f] + +### Maintenance +- Bump minimum go version [dee1e3c] +- docs: fix typo in example usage "occured" -> "occurred" [49005fe] +- Bump actions/setup-go from 4 to 5 (#714) [f1c8757] +- Bump github/codeql-action from 2 to 3 (#715) [9836e76] +- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0] +- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc] +- docs: fix `HaveExactElement` typo (#712) [a672c86] + +## 1.30.0 + +### Features +- BeTrueBecause and BeFalseBecause allow for better failure messages [4da4c7f] + +### Maintenance +- Bump actions/checkout from 3 to 4 (#694) [6ca6e97] +- doc: fix type on gleak go doc [f1b8343] + ## 1.29.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index ba082146..9697d513 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.29.0" +const GOMEGA_VERSION = "1.33.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -372,11 +372,11 @@ You can ensure that you get a number of consecutive successful tries before succ Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: - Eventually(..., "1s", "2s", ctx).Should(...) + Eventually(..., "10s", "2s", ctx).Should(...) is equivalent to - Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) + Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 1188b0bc..cde9e2ec 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -553,7 +553,12 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Unlock() } case <-contextDone: - fail("Context was cancelled") + err := context.Cause(assertion.ctx) + if err != nil && err != context.Canceled { + fail(fmt.Sprintf("Context was cancelled (cause: %s)", err)) + } else { + fail("Context was cancelled") + } return false case <-timeout: if assertion.asyncType == AsyncAssertionTypeEventually { diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index cd3f431d..7ef27dc9 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -1,6 +1,7 @@ package gomega import ( + "fmt" "time" "github.com/google/go-cmp/cmp" @@ -52,15 +53,31 @@ func BeNil() types.GomegaMatcher { } // BeTrue succeeds if actual is true +// +// In general, it's better to use `BeTrueBecause(reason)` to provide a more useful error message if a true check fails. func BeTrue() types.GomegaMatcher { return &matchers.BeTrueMatcher{} } // BeFalse succeeds if actual is false +// +// In general, it's better to use `BeFalseBecause(reason)` to provide a more useful error message if a false check fails. func BeFalse() types.GomegaMatcher { return &matchers.BeFalseMatcher{} } +// BeTrueBecause succeeds if actual is true and displays the provided reason if it is false +// fmt.Sprintf is used to render the reason +func BeTrueBecause(format string, args ...any) types.GomegaMatcher { + return &matchers.BeTrueMatcher{Reason: fmt.Sprintf(format, args...)} +} + +// BeFalseBecause succeeds if actual is false and displays the provided reason if it is true. +// fmt.Sprintf is used to render the reason +func BeFalseBecause(format string, args ...any) types.GomegaMatcher { + return &matchers.BeFalseMatcher{Reason: fmt.Sprintf(format, args...)} +} + // HaveOccurred succeeds if actual is a non-nil error // The typical Go error checking pattern looks like: // @@ -177,20 +194,21 @@ func BeClosed() types.GomegaMatcher { // // will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing)) // Expect(myThing.Sprocket).Should(Equal("foo")) // Expect(myThing.IsValid()).Should(BeTrue()) +// +// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, +// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) func Receive(args ...interface{}) types.GomegaMatcher { - var arg interface{} - if len(args) > 0 { - arg = args[0] - } - return &matchers.ReceiveMatcher{ - Arg: arg, + Args: args, } } @@ -377,7 +395,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } -// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 8ab4bb91..4e389785 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -41,9 +41,9 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m } func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { - return cmp.Diff(matcher.Expected, actual, matcher.Options) + return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to equal", matcher.Expected) + return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go index e326c015..8ee2b1c5 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -9,6 +9,7 @@ import ( ) type BeFalseMatcher struct { + Reason string } func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { @@ -20,9 +21,17 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be false") + if matcher.Reason == "" { + return format.Message(actual, "to be false") + } else { + return matcher.Reason + } } func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be false") + if matcher.Reason == "" { + return format.Message(actual, "not to be false") + } else { + return fmt.Sprintf(`Expected not false but got false\nNegation of "%s" failed`, matcher.Reason) + } } diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go index 60bc1e3f..3576aac8 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -9,6 +9,7 @@ import ( ) type BeTrueMatcher struct { + Reason string } func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { @@ -20,9 +21,17 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error } func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be true") + if matcher.Reason == "" { + return format.Message(actual, "to be true") + } else { + return matcher.Reason + } } func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be true") + if matcher.Reason == "" { + return format.Message(actual, "not to be true") + } else { + return fmt.Sprintf(`Expected not true but got true\nNegation of "%s" failed`, matcher.Reason) + } } diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 1936a2ba..948164ea 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -10,7 +11,7 @@ import ( ) type ReceiveMatcher struct { - Arg interface{} + Args []interface{} receivedValue reflect.Value channelClosed bool } @@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - - if matcher.Arg != nil { - subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + var resultReference interface{} + + // Valid arg formats are as follows, always with optional POINTER before + // optional MATCHER: + // - Receive() + // - Receive(POINTER) + // - Receive(MATCHER) + // - Receive(POINTER, MATCHER) + args := matcher.Args + if len(args) > 0 { + arg := args[0] + _, isSubMatcher := arg.(omegaMatcher) + if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr { + // Consume optional POINTER arg first, if it ain't no matcher ;) + resultReference = arg + args = args[1:] + } + } + if len(args) > 0 { + arg := args[0] + subMatcher, hasSubMatcher = arg.(omegaMatcher) if !hasSubMatcher { - argType := reflect.TypeOf(matcher.Arg) - if argType.Kind() != reflect.Ptr { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } + // At this point we assume the dev user wanted to assign a received + // value, so [POINTER,]MATCHER. + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1)) } + // Consume optional MATCHER arg. + args = args[1:] + } + if len(args) > 0 { + // If there are still args present, reject all. + return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher") } winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ @@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } if hasSubMatcher { - if didReceive { - matcher.receivedValue = value - return subMatcher.Match(matcher.receivedValue.Interface()) + if !didReceive { + return false, nil } - return false, nil + matcher.receivedValue = value + if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match { + return match, err + } + // if we received a match, then fall through in order to handle an + // optional assignment of the received value to the specified reference. } if didReceive { - if matcher.Arg != nil { - outValue := reflect.ValueOf(matcher.Arg) + if resultReference != nil { + outValue := reflect.ValueOf(resultReference) if value.Type().AssignableTo(outValue.Elem().Type()) { outValue.Elem().Set(value) @@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro outValue.Elem().Set(value.Elem()) return true, nil } else { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1)) } } @@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { @@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin } func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { diff --git a/vendor/github.com/x448/float16/.travis.yml b/vendor/github.com/x448/float16/.travis.yml new file mode 100644 index 00000000..8902bdaa --- /dev/null +++ b/vendor/github.com/x448/float16/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.11.x + +env: + - GO111MODULE=on + +script: + - go test -short -coverprofile=coverage.txt -covermode=count ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/x448/float16/LICENSE b/vendor/github.com/x448/float16/LICENSE new file mode 100644 index 00000000..bf6e3578 --- /dev/null +++ b/vendor/github.com/x448/float16/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/x448/float16/README.md b/vendor/github.com/x448/float16/README.md new file mode 100644 index 00000000..b524b813 --- /dev/null +++ b/vendor/github.com/x448/float16/README.md @@ -0,0 +1,133 @@ +# Float16 (Binary16) in Go/Golang +[![Build Status](https://travis-ci.org/x448/float16.svg?branch=master)](https://travis-ci.org/x448/float16) +[![codecov](https://codecov.io/gh/x448/float16/branch/master/graph/badge.svg?v=4)](https://codecov.io/gh/x448/float16) +[![Go Report Card](https://goreportcard.com/badge/github.com/x448/float16)](https://goreportcard.com/report/github.com/x448/float16) +[![Release](https://img.shields.io/github/release/x448/float16.svg?style=flat-square)](https://github.com/x448/float16/releases) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/x448/float16/master/LICENSE) + +`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16. + +IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result. + +All possible 4+ billion floating-point conversions with this library are verified to be correct. + +Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library. + +## Features +Current features include: + +* float16 to float32 conversions use lossless conversion. +* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven". +* conversions using pure Go take about 2.65 ns/op on a desktop amd64. +* unit tests provide 100% code coverage and check all possible 4+ billion conversions. +* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc. +* all functions in this library use zero allocs except String(). + +## Status +This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published. + +Current status: + +* core API is done and breaking API changes are unlikely. +* 100% of unit tests pass: + * short mode (`go test -short`) tests around 65765 conversions in 0.005s. + * normal mode (`go test`) tests all possible 4+ billion conversions in about 95s. +* 100% code coverage with both short mode and normal mode. +* tested on amd64 but it should work on all little-endian platforms supported by Go. + +Roadmap: + +* add functions for fast batch conversions leveraging SIMD when supported by hardware. +* speed up unit test when verifying all possible 4+ billion conversions. +* test on additional platforms. + +## Float16 to Float32 Conversion +Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct. + +Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions. + +## Float32 to Float16 Conversion +Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct. + +Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32(). + +Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage. + +## Usage +Install with `go get github.com/x448/float16`. +``` +// Convert float32 to float16 +pi := float32(math.Pi) +pi16 := float16.Fromfloat32(pi) + +// Convert float16 to float32 +pi32 := pi16.Float32() + +// PrecisionFromfloat32() is faster than the overhead of calling a function. +// This example only converts if there's no data loss and input is not a subnormal. +if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact { + pi16 := float16.Fromfloat32(pi) +} +``` + +## Float16 Type and API +Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods. +``` +package float16 // import "github.com/x448/float16" + +// Exported types and consts +type Float16 uint16 +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +// Exported functions +Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding + with identical results to AMD and Intel F16C hardware. NaN inputs + are converted with quiet bit always set on, to be like F16C. + +FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit. + // The "ps" suffix means "preserve signaling". + // Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN. + +Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.) +NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number +Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign + +PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow + // (inline and < 1 ns/op) +// Exported methods +(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion +(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f +(f Float16) IsNaN() bool // true if f is not-a-number (NaN) +(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN) +(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf) +(f Float16) IsFinite() bool // true if f is not infinite or NaN +(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN. +(f Float16) Signbit() bool // true if f is negative or negative zero +(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface +``` +See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info. + +## Benchmarks +Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value. + +``` +All functions have zero allocations except float16.String(). + +FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16 +ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32 +Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16 + +PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc. +``` + +## System Requirements +* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions. +* Tested on amd64 but it should also work on all little-endian platforms supported by Go. + +## Special Thanks +Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16. + +## License +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker + +Licensed under [MIT License](LICENSE) diff --git a/vendor/github.com/x448/float16/float16.go b/vendor/github.com/x448/float16/float16.go new file mode 100644 index 00000000..1a0e6dad --- /dev/null +++ b/vendor/github.com/x448/float16/float16.go @@ -0,0 +1,302 @@ +// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker +// +// Special thanks to Kathryn Long for her Rust implementation +// of float16 at github.com/starkat99/half-rs (MIT license) + +package float16 + +import ( + "math" + "strconv" +) + +// Float16 represents IEEE 754 half-precision floating-point numbers (binary16). +type Float16 uint16 + +// Precision indicates whether the conversion to Float16 is +// exact, subnormal without dropped bits, inexact, underflow, or overflow. +type Precision int + +const ( + + // PrecisionExact is for non-subnormals that don't drop bits during conversion. + // All of these can round-trip. Should always convert to float16. + PrecisionExact Precision = iota + + // PrecisionUnknown is for subnormals that don't drop bits during conversion but + // not all of these can round-trip so precision is unknown without more effort. + // Only 2046 of these can round-trip and the rest cannot round-trip. + PrecisionUnknown + + // PrecisionInexact is for dropped significand bits and cannot round-trip. + // Some of these are subnormals. Cannot round-trip float32->float16->float32. + PrecisionInexact + + // PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32. + PrecisionUnderflow + + // PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32. + PrecisionOverflow +) + +// PrecisionFromfloat32 returns Precision without performing +// the conversion. Conversions from both Infinity and NaN +// values will always report PrecisionExact even if NaN payload +// or NaN-Quiet-Bit is lost. This function is kept simple to +// allow inlining and run < 0.5 ns/op, to serve as a fast filter. +func PrecisionFromfloat32(f32 float32) Precision { + u32 := math.Float32bits(f32) + + if u32 == 0 || u32 == 0x80000000 { + // +- zero will always be exact conversion + return PrecisionExact + } + + const COEFMASK uint32 = 0x7fffff // 23 least significant bits + const EXPSHIFT uint32 = 23 + const EXPBIAS uint32 = 127 + const EXPMASK uint32 = uint32(0xff) << EXPSHIFT + const DROPMASK uint32 = COEFMASK >> 10 + + exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS) + coef := u32 & COEFMASK + + if exp == 128 { + // +- infinity or NaN + // apps may want to do extra checks for NaN separately + return PrecisionExact + } + + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format says, + // "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24" + if exp < -24 { + return PrecisionUnderflow + } + if exp > 15 { + return PrecisionOverflow + } + if (coef & DROPMASK) != uint32(0) { + // these include subnormals and non-subnormals that dropped bits + return PrecisionInexact + } + + if exp < -14 { + // Subnormals. Caller may want to test these further. + // There are 2046 subnormals that can successfully round-trip f32->f16->f32 + // and 20 of those 2046 have 32-bit input coef == 0. + // RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value" + // so some protocols and libraries will choose to handle subnormals differently + // when deciding to encode them to CBOR float32 vs float16. + return PrecisionUnknown + } + + return PrecisionExact +} + +// Frombits returns the float16 number corresponding to the IEEE 754 binary16 +// representation u16, with the sign bit of u16 and the result in the same bit +// position. Frombits(Bits(x)) == x. +func Frombits(u16 uint16) Float16 { + return Float16(u16) +} + +// Fromfloat32 returns a Float16 value converted from f32. Conversion uses +// IEEE default rounding (nearest int, with ties to even). +func Fromfloat32(f32 float32) Float16 { + return Float16(f32bitsToF16bits(math.Float32bits(f32))) +} + +// ErrInvalidNaNValue indicates a NaN was not received. +const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN") + +type float16Error string + +func (e float16Error) Error() string { return string(e) } + +// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both +// signaling and payload. Unlike Fromfloat32(), which can only return +// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN. +// If the result is infinity (sNaN with empty payload), then the +// lowest bit of payload is set to make the result a NaN. +// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN. +// This function was kept simple to be able to inline. +func FromNaN32ps(nan float32) (Float16, error) { + const SNAN = Float16(uint16(0x7c01)) // signalling NaN + + u32 := math.Float32bits(nan) + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if (exp != 0x7f800000) || (coef == 0) { + return SNAN, ErrInvalidNaNValue + } + + u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13)) + + if (u16 & 0x03ff) == 0 { + // result became infinity, make it NaN by setting lowest bit in payload + u16 = u16 | 0x0001 + } + + return Float16(u16), nil +} + +// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN). +// Returned NaN value 0x7e01 has all exponent bits = 1 with the +// first and last bits = 1 in the significand. This is consistent +// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00. +func NaN() Float16 { + return Float16(0x7e01) +} + +// Inf returns a Float16 with an infinity value with the specified sign. +// A sign >= returns positive infinity. +// A sign < 0 returns negative infinity. +func Inf(sign int) Float16 { + if sign >= 0 { + return Float16(0x7c00) + } + return Float16(0x8000 | 0x7c00) +} + +// Float32 returns a float32 converted from f (Float16). +// This is a lossless conversion. +func (f Float16) Float32() float32 { + u32 := f16bitsToF32bits(uint16(f)) + return math.Float32frombits(u32) +} + +// Bits returns the IEEE 754 binary16 representation of f, with the sign bit +// of f and the result in the same bit position. Bits(Frombits(x)) == x. +func (f Float16) Bits() uint16 { + return uint16(f) +} + +// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value. +func (f Float16) IsNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) +} + +// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16 +// “not-a-number” value. +func (f Float16) IsQuietNaN() bool { + return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0) +} + +// IsInf reports whether f is an infinity (inf). +// A sign > 0 reports whether f is positive inf. +// A sign < 0 reports whether f is negative inf. +// A sign == 0 reports whether f is either inf. +func (f Float16) IsInf(sign int) bool { + return ((f == 0x7c00) && sign >= 0) || + (f == 0xfc00 && sign <= 0) +} + +// IsFinite returns true if f is neither infinite nor NaN. +func (f Float16) IsFinite() bool { + return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00) +} + +// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN. +func (f Float16) IsNormal() bool { + exp := uint16(f) & uint16(0x7c00) + return (exp != uint16(0x7c00)) && (exp != 0) +} + +// Signbit reports whether f is negative or negative zero. +func (f Float16) Signbit() bool { + return (uint16(f) & uint16(0x8000)) != 0 +} + +// String satisfies the fmt.Stringer interface. +func (f Float16) String() string { + return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32) +} + +// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16. +func f16bitsToF32bits(in uint16) uint32 { + // All 65536 conversions with this were confirmed to be correct + // by Montgomery Edwards⁴⁴⁸ (github.com/x448). + + sign := uint32(in&0x8000) << 16 // sign for 32-bit + exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit + coef := uint32(in&0x03ff) << 13 // significand for 32-bit + + if exp == 0x1f { + if coef == 0 { + // infinity + return sign | 0x7f800000 | coef + } + // NaN + return sign | 0x7fc00000 | coef + } + + if exp == 0 { + if coef == 0 { + // zero + return sign + } + + // normalize subnormal numbers + exp++ + for coef&0x7f800000 == 0 { + coef <<= 1 + exp-- + } + coef &= 0x007fffff + } + + return sign | ((exp + (0x7f - 0xf)) << 23) | coef +} + +// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32. +// Conversion rounds to nearest integer with ties to even. +func f32bitsToF16bits(u32 uint32) uint16 { + // Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448). + // All 4294967296 conversions with this were confirmed to be correct by x448. + // Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license. + + sign := u32 & 0x80000000 + exp := u32 & 0x7f800000 + coef := u32 & 0x007fffff + + if exp == 0x7f800000 { + // NaN or Infinity + nanBit := uint32(0) + if coef != 0 { + nanBit = uint32(0x0200) + } + return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13)) + } + + halfSign := sign >> 16 + + unbiasedExp := int32(exp>>23) - 127 + halfExp := unbiasedExp + 15 + + if halfExp >= 0x1f { + return uint16(halfSign | uint32(0x7c00)) + } + + if halfExp <= 0 { + if 14-halfExp > 24 { + return uint16(halfSign) + } + coef := coef | uint32(0x00800000) + halfCoef := coef >> uint32(14-halfExp) + roundBit := uint32(1) << uint32(13-halfExp) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + halfCoef++ + } + return uint16(halfSign | halfCoef) + } + + uHalfExp := uint32(halfExp) << 10 + halfCoef := coef >> 13 + roundBit := uint32(0x00001000) + if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 { + return uint16((halfSign | uHalfExp | halfCoef) + 1) + } + return uint16(halfSign | uHalfExp | halfCoef) +} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index d2ca5dee..b3c1699b 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -19,15 +19,14 @@ #define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ MULHDU r0, h0, t1; \ + MULLD r0, h1, t4; \ MULHDU r0, h1, t5; \ ADDC t4, t1, t1; \ MULLD r0, h2, t2; \ - ADDZE t5; \ MULHDU r1, h0, t4; \ MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ + ADDE t5, t2, t2; \ ADDC h0, t1, t1; \ MULLD h2, r1, t3; \ ADDZE t4, h0; \ @@ -37,13 +36,11 @@ ADDE t5, t3, t3; \ ADDC h0, t2, t2; \ MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ + RLDICL $0, t2, $62, h2; \ + AND t2, t4, h0; \ ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ + ADDE t3, t1, h1; \ SLD $62, t3, t4; \ SRD $2, t2; \ ADDZE h2; \ @@ -75,6 +72,7 @@ TEXT ·update(SB), $0-32 loop: POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + PCALIGN $16 multiply: POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) ADD $-16, R5 diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 2466ae3d..3a7e5ab1 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -104,7 +104,7 @@ tokenization, and tokenization and tree construction stages of the WHATWG HTML parsing specification respectively. While the tokenizer parses and normalizes individual HTML tokens, only the parser constructs the DOM tree from the tokenized HTML, as described in the tree construction stage of the -specification, dynamically modifying or extending the docuemnt's DOM tree. +specification, dynamically modifying or extending the document's DOM tree. If your use case requires semantically well-formed HTML documents, as defined by the WHATWG specification, the parser should be used rather than the tokenizer. diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e85..9b4de940 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d8..105c3b27 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1564,6 +1567,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,8 +1580,38 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1594,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df281..003e649f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" "fmt" "io" @@ -26,6 +27,7 @@ import ( "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) @@ -210,12 +212,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -383,3 +379,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984f..3b9f06b9 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c640..6c349f3e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -153,6 +154,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState + + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface +} + +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() + } +} + +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() + } + return time.Now() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} } func (s *Server) initialConnRecvWindowSize() int32 { @@ -399,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() @@ -425,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -434,7 +475,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -598,8 +639,8 @@ type serverConn struct { inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -648,12 +689,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -731,11 +772,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -814,8 +851,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -846,6 +884,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -924,14 +963,14 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + if sc.srv.IdleTimeout > 0 { + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() loopNum := 0 @@ -1060,10 +1099,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1428,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1481,6 +1520,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1637,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1659,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -2017,9 +2062,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2038,7 +2083,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,8 +2161,8 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + if sc.hs.WriteTimeout > 0 { + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2341,6 +2386,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2637,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2759,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2775,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2785,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2801,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 00000000..0b1c17b8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index df578b86..98a49c6b 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,46 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + *transportTestHooks +} + +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() + } +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) + } + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) + } + return timeTimer{time.AfterFunc(d, f)} +} + +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +348,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -446,6 +492,7 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() @@ -537,15 +584,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +611,13 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + tm := t.newTimer(d) select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +696,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -751,9 +792,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) + c = cc.tconn } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,6 +860,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + go cc.readLoop() return cc, nil } @@ -826,7 +874,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -861,7 +909,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -1057,6 +1118,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1215,6 +1277,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,7 +1295,28 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { select { @@ -1322,8 +1409,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1334,7 +1422,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1372,24 +1460,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1452,9 +1524,9 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1875,6 +1947,22 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + var errNilRequestURL = errors.New("http2: Request.URI is nil") // requires cc.wmu be held. @@ -1912,19 +2000,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2165,6 +2248,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2266,10 +2350,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2767,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2911,6 +2994,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() @@ -2955,24 +3047,26 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) + var pingError error + errc := make(chan struct{}) go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } }() select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3141,9 +3235,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c66..f6783339 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index 573fe79e..d7d4b8b6 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -137,9 +137,7 @@ func (p *PerHost) AddNetwork(net *net.IPNet) { // AddZone specifies a DNS suffix that will use the bypass proxy. A zone of // "example.com" matches "example.com" and all of its subdomains. func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } + zone = strings.TrimSuffix(zone, ".") if !strings.HasPrefix(zone, ".") { zone = "." + zone } @@ -148,8 +146,6 @@ func (p *PerHost) AddZone(zone string) { // AddHost specifies a host name that will use the bypass proxy. func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } + host = strings.TrimSuffix(host, ".") p.bypassHosts = append(p.bypassHosts, host) } diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go index 69a4ac7e..1e64157f 100644 --- a/vendor/golang.org/x/net/websocket/client.go +++ b/vendor/golang.org/x/net/websocket/client.go @@ -6,10 +6,12 @@ package websocket import ( "bufio" + "context" "io" "net" "net/http" "net/url" + "time" ) // DialError is an error that occurs while dialling a websocket server. @@ -79,28 +81,59 @@ func parseAuthority(location *url.URL) string { // DialConfig opens a new client connection to a WebSocket with a config. func DialConfig(config *Config) (ws *Conn, err error) { - var client net.Conn + return config.DialContext(context.Background()) +} + +// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation. +func (config *Config) DialContext(ctx context.Context) (*Conn, error) { if config.Location == nil { return nil, &DialError{config, ErrBadWebSocketLocation} } if config.Origin == nil { return nil, &DialError{config, ErrBadWebSocketOrigin} } + dialer := config.Dialer if dialer == nil { dialer = &net.Dialer{} } - client, err = dialWithDialer(dialer, config) - if err != nil { - goto Error - } - ws, err = NewClient(config, client) + + client, err := dialWithDialer(ctx, dialer, config) if err != nil { - client.Close() - goto Error + return nil, &DialError{config, err} } - return -Error: - return nil, &DialError{config, err} + // Cleanup the connection if we fail to create the websocket successfully + success := false + defer func() { + if !success { + _ = client.Close() + } + }() + + var ws *Conn + var wsErr error + doneConnecting := make(chan struct{}) + go func() { + defer close(doneConnecting) + ws, err = NewClient(config, client) + if err != nil { + wsErr = &DialError{config, err} + } + }() + + // The websocket.NewClient() function can block indefinitely, make sure that we + // respect the deadlines specified by the context. + select { + case <-ctx.Done(): + // Force the pending operations to fail, terminating the pending connection attempt + _ = client.SetDeadline(time.Now()) + <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish + return nil, &DialError{config, ctx.Err()} + case <-doneConnecting: + if wsErr == nil { + success = true // Disarm the deferred connection cleanup + } + return ws, wsErr + } } diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go index 2dab943a..8a2d83c4 100644 --- a/vendor/golang.org/x/net/websocket/dial.go +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -5,18 +5,23 @@ package websocket import ( + "context" "crypto/tls" "net" ) -func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { +func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) { switch config.Location.Scheme { case "ws": - conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) case "wss": - conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + tlsDialer := &tls.Dialer{ + NetDialer: dialer, + Config: config.TlsConfig, + } + conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location)) default: err = ErrBadScheme } diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go index 48a069e1..dda74346 100644 --- a/vendor/golang.org/x/net/websocket/hybi.go +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -16,7 +16,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -279,7 +278,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er } } if header := frame.HeaderReader(); header != nil { - io.Copy(ioutil.Discard, header) + io.Copy(io.Discard, header) } switch frame.PayloadType() { case ContinuationFrame: @@ -294,7 +293,7 @@ func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, er if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return nil, err } - io.Copy(ioutil.Discard, frame) + io.Copy(io.Discard, frame) if frame.PayloadType() == PingFrame { if _, err := handler.WritePong(b[:n]); err != nil { return nil, err diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 90a2257c..923a5780 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -17,7 +17,6 @@ import ( "encoding/json" "errors" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -208,7 +207,7 @@ again: n, err = ws.frameReader.Read(msg) if err == io.EOF { if trailer := ws.frameReader.TrailerReader(); trailer != nil { - io.Copy(ioutil.Discard, trailer) + io.Copy(io.Discard, trailer) } ws.frameReader = nil goto again @@ -330,7 +329,7 @@ func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { ws.rio.Lock() defer ws.rio.Unlock() if ws.frameReader != nil { - _, err = io.Copy(ioutil.Discard, ws.frameReader) + _, err = io.Copy(io.Discard, ws.frameReader) if err != nil { return err } @@ -362,7 +361,7 @@ again: return ErrFrameTooLarge } payloadType := frame.PayloadType() - data, err := ioutil.ReadAll(frame) + data, err := io.ReadAll(frame) if err != nil { return err } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 00000000..948a3ee6 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,135 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := withCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 00000000..f93c740b --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 00000000..88ce3343 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 4756ad5f..8fa707aa 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -103,6 +103,7 @@ var ARM64 struct { HasASIMDDP bool // Advanced SIMD double precision instruction set HasSHA512 bool // SHA512 hardware implementation HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f3eb993b..0e27a21e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -28,6 +28,7 @@ func initOptions() { {Name: "sm3", Feature: &ARM64.HasSM3}, {Name: "sm4", Feature: &ARM64.HasSM4}, {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, {Name: "crc32", Feature: &ARM64.HasCRC32}, {Name: "atomics", Feature: &ARM64.HasATOMICS}, {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, @@ -164,6 +165,15 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { switch extractBits(pfr0, 32, 35) { case 1: ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index fcb9a388..22cc9984 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -29,3 +29,11 @@ TEXT ·getpfr0(SB),NOSPLIT,$0-8 WORD $0xd5380400 MOVD R0, ret+0(FP) RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index a8acd3e3..6ac6e1ef 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -9,3 +9,4 @@ package cpu func getisar0() uint64 func getisar1() uint64 func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index a968b80f..3d386d0f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,6 +35,8 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + + hwcap2_SVE2 = 1 << 1 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -104,6 +106,9 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdf..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 5627d70e..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index d60ab1b4..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index e7d3df4b..b0e41985 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 2f67ba86..813dfad7 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -9,9 +9,11 @@ #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define EDCHPXV(x) 1016(x) // in the CAA -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA +#define CEECAATHDID(x) 976(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define GOCB(x) 1104(x) // in the CAA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -19,405 +21,362 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 +// Function Descriptor Offsets +#define __errno 0x156*16 +#define __err2ad 0x16C*16 -TEXT ·clearErrno(SB),NOSPLIT,$0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +// Call Instructions +#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 +#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD +#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE + +DATA zosLibVec<>(SB)/8, $0 +GLOBL zosLibVec<>(SB), NOPTR, $8 + +TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R8 + MOVD EDCHPXV(R8), R8 + MOVD R8, zosLibVec<>(SB) + RET + +TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVD zosLibVec<>(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·clearErrno(SB), NOSPLIT, $0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 +TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(0x156*16), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(__errno), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB), NOSPLIT, $0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + BYTE $0x0D // Branch to function + BYTE $0xEF - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + SVC_LOAD + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, ret+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + SVC_DELETE + MOVD R2, R15 // Restore go stack pointer - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) +error: + MOVD $0, ret+8(FP) // Return 0 on failure - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) done: + XOR R0, R0 // Reset r0 to 0 RET -TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD fnptr+8(FP), R15 + SVC_DELETE + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, ret+16(FP) // Return SVC return code + RET +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD CEECAATHDID(R9), R9 + MOVD R9, ret+0(FP) - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is -1 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $-1 // compare result to -1 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL ·rrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + done: + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall9(SB),NOSPLIT,$0 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is 0 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD R3, ret+32(FP) + CMP R3, $0 // compare result to 0 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - RET - -// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB),NOSPLIT,$0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) - - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 - - BYTE $0x0D // Branch to function - BYTE $0xEF - - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 - - RET - -// func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - BYTE $0x0A // SVC 08 LOAD - BYTE $0x08 - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error - - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, addr+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done - - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - BYTE $0x0A // SVC 09 DELETE - BYTE $0x09 - MOVD R2, R15 // Restore go stack pointer + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + XOR R2, R2 + MOVWZ R2, (R3) // clear errno2 -error: - MOVD $0, addr+8(FP) // Return 0 on failure done: - XOR R0, R0 // Reset r0 to 0 + MOVD R4, 0(R9) // Save stack pointer. RET -// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD addr+8(FP), R15 - BYTE $0x0A // SVC 09 - BYTE $0x09 - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, rc+0(FP) // Return SVC return code +// +// function to test if a pointer can be safely dereferenced (content read) +// return 0 for succces +// +TEXT ·ptrtest(SB), NOSPLIT, $0-16 + MOVD arg+0(FP), R10 // test pointer in R10 + + // set up R2 to point to CEECAADMC + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + + // set up R5 to point to the "shunt" path which set 1 to R3 (failure) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + + // if r3 is not zero (failed) then branch to finish + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + + // stomic store shunt address in R5 into CEECAADMC + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + + // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above + BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) + + // finish here, restore 0 into CEECAADMC + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R3, ret+8(FP) // result in R3 RET -// func gettid() uint64 -TEXT ·gettid(SB), NOSPLIT, $0 - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get CEECAATHDID - MOVD CAA(R8), R9 - MOVD 0x3D0(R9), R9 - MOVD R9, ret+0(FP) - +// +// function to test if a untptr can be loaded from a pointer +// return 1: the 8-byte content +// 2: 0 for success, 1 for failure +// +// func safeload(ptr uintptr) ( value uintptr, error uintptr) +TEXT ·safeload(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R10 // test pointer in R10 + MOVD $0x0, R6 + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R6, value+8(FP) // result in R6 + MOVD R3, error+16(FP) // error in R3 RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go new file mode 100644 index 00000000..39d647d8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go @@ -0,0 +1,657 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos + +package unix + +import ( + "bytes" + "fmt" + "unsafe" +) + +//go:noescape +func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +//go:noescape +func A2e([]byte) + +//go:noescape +func E2a([]byte) + +const ( + BPX4STA = 192 // stat + BPX4FST = 104 // fstat + BPX4LST = 132 // lstat + BPX4OPN = 156 // open + BPX4CLO = 72 // close + BPX4CHR = 500 // chattr + BPX4FCR = 504 // fchattr + BPX4LCR = 1180 // lchattr + BPX4CTW = 492 // cond_timed_wait + BPX4GTH = 1056 // __getthent + BPX4PTQ = 412 // pthread_quiesc + BPX4PTR = 320 // ptrace +) + +const ( + //options + //byte1 + BPX_OPNFHIGH = 0x80 + //byte2 + BPX_OPNFEXEC = 0x80 + //byte3 + BPX_O_NOLARGEFILE = 0x08 + BPX_O_LARGEFILE = 0x04 + BPX_O_ASYNCSIG = 0x02 + BPX_O_SYNC = 0x01 + //byte4 + BPX_O_CREXCL = 0xc0 + BPX_O_CREAT = 0x80 + BPX_O_EXCL = 0x40 + BPX_O_NOCTTY = 0x20 + BPX_O_TRUNC = 0x10 + BPX_O_APPEND = 0x08 + BPX_O_NONBLOCK = 0x04 + BPX_FNDELAY = 0x04 + BPX_O_RDWR = 0x03 + BPX_O_RDONLY = 0x02 + BPX_O_WRONLY = 0x01 + BPX_O_ACCMODE = 0x03 + BPX_O_GETFL = 0x0f + + //mode + // byte1 (file type) + BPX_FT_DIR = 1 + BPX_FT_CHARSPEC = 2 + BPX_FT_REGFILE = 3 + BPX_FT_FIFO = 4 + BPX_FT_SYMLINK = 5 + BPX_FT_SOCKET = 6 + //byte3 + BPX_S_ISUID = 0x08 + BPX_S_ISGID = 0x04 + BPX_S_ISVTX = 0x02 + BPX_S_IRWXU1 = 0x01 + BPX_S_IRUSR = 0x01 + //byte4 + BPX_S_IRWXU2 = 0xc0 + BPX_S_IWUSR = 0x80 + BPX_S_IXUSR = 0x40 + BPX_S_IRWXG = 0x38 + BPX_S_IRGRP = 0x20 + BPX_S_IWGRP = 0x10 + BPX_S_IXGRP = 0x08 + BPX_S_IRWXOX = 0x07 + BPX_S_IROTH = 0x04 + BPX_S_IWOTH = 0x02 + BPX_S_IXOTH = 0x01 + + CW_INTRPT = 1 + CW_CONDVAR = 32 + CW_TIMEOUT = 64 + + PGTHA_NEXT = 2 + PGTHA_CURRENT = 1 + PGTHA_FIRST = 0 + PGTHA_LAST = 3 + PGTHA_PROCESS = 0x80 + PGTHA_CONTTY = 0x40 + PGTHA_PATH = 0x20 + PGTHA_COMMAND = 0x10 + PGTHA_FILEDATA = 0x08 + PGTHA_THREAD = 0x04 + PGTHA_PTAG = 0x02 + PGTHA_COMMANDLONG = 0x01 + PGTHA_THREADFAST = 0x80 + PGTHA_FILEPATH = 0x40 + PGTHA_THDSIGMASK = 0x20 + // thread quiece mode + QUIESCE_TERM int32 = 1 + QUIESCE_FORCE int32 = 2 + QUIESCE_QUERY int32 = 3 + QUIESCE_FREEZE int32 = 4 + QUIESCE_UNFREEZE int32 = 5 + FREEZE_THIS_THREAD int32 = 6 + FREEZE_EXIT int32 = 8 + QUIESCE_SRB int32 = 9 +) + +type Pgtha struct { + Pid uint32 // 0 + Tid0 uint32 // 4 + Tid1 uint32 + Accesspid byte // C + Accesstid byte // D + Accessasid uint16 // E + Loginname [8]byte // 10 + Flag1 byte // 18 + Flag1b2 byte // 19 +} + +type Bpxystat_t struct { // DSECT BPXYSTAT + St_id [4]uint8 // 0 + St_length uint16 // 0x4 + St_version uint16 // 0x6 + St_mode uint32 // 0x8 + St_ino uint32 // 0xc + St_dev uint32 // 0x10 + St_nlink uint32 // 0x14 + St_uid uint32 // 0x18 + St_gid uint32 // 0x1c + St_size uint64 // 0x20 + St_atime uint32 // 0x28 + St_mtime uint32 // 0x2c + St_ctime uint32 // 0x30 + St_rdev uint32 // 0x34 + St_auditoraudit uint32 // 0x38 + St_useraudit uint32 // 0x3c + St_blksize uint32 // 0x40 + St_createtime uint32 // 0x44 + St_auditid [4]uint32 // 0x48 + St_res01 uint32 // 0x58 + Ft_ccsid uint16 // 0x5c + Ft_flags uint16 // 0x5e + St_res01a [2]uint32 // 0x60 + St_res02 uint32 // 0x68 + St_blocks uint32 // 0x6c + St_opaque [3]uint8 // 0x70 + St_visible uint8 // 0x73 + St_reftime uint32 // 0x74 + St_fid uint64 // 0x78 + St_filefmt uint8 // 0x80 + St_fspflag2 uint8 // 0x81 + St_res03 [2]uint8 // 0x82 + St_ctimemsec uint32 // 0x84 + St_seclabel [8]uint8 // 0x88 + St_res04 [4]uint8 // 0x90 + // end of version 1 + _ uint32 // 0x94 + St_atime64 uint64 // 0x98 + St_mtime64 uint64 // 0xa0 + St_ctime64 uint64 // 0xa8 + St_createtime64 uint64 // 0xb0 + St_reftime64 uint64 // 0xb8 + _ uint64 // 0xc0 + St_res05 [16]uint8 // 0xc8 + // end of version 2 +} + +type BpxFilestatus struct { + Oflag1 byte + Oflag2 byte + Oflag3 byte + Oflag4 byte +} + +type BpxMode struct { + Ftype byte + Mode1 byte + Mode2 byte + Mode3 byte +} + +// Thr attribute structure for extended attributes +type Bpxyatt_t struct { // DSECT BPXYATT + Att_id [4]uint8 + Att_version uint16 + Att_res01 [2]uint8 + Att_setflags1 uint8 + Att_setflags2 uint8 + Att_setflags3 uint8 + Att_setflags4 uint8 + Att_mode uint32 + Att_uid uint32 + Att_gid uint32 + Att_opaquemask [3]uint8 + Att_visblmaskres uint8 + Att_opaque [3]uint8 + Att_visibleres uint8 + Att_size_h uint32 + Att_size_l uint32 + Att_atime uint32 + Att_mtime uint32 + Att_auditoraudit uint32 + Att_useraudit uint32 + Att_ctime uint32 + Att_reftime uint32 + // end of version 1 + Att_filefmt uint8 + Att_res02 [3]uint8 + Att_filetag uint32 + Att_res03 [8]uint8 + // end of version 2 + Att_atime64 uint64 + Att_mtime64 uint64 + Att_ctime64 uint64 + Att_reftime64 uint64 + Att_seclabel [8]uint8 + Att_ver3res02 [8]uint8 + // end of version 3 +} + +func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(options) + parms[3] = unsafe.Pointer(mode) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4OPN) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxClose(fd int32) (rv int32, rc int32, rn int32) { + var parms [4]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&rv) + parms[2] = unsafe.Pointer(&rc) + parms[3] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CLO) + return rv, rc, rn +} + +func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&stat_sz) + parms[2] = unsafe.Pointer(st) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FST) + return rv, rc, rn +} + +func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4STA) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LST) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CHR) + return rv, rc, rn +} + +func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LCR) + return rv, rc, rn +} + +func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&attr_sz) + parms[2] = unsafe.Pointer(attr) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FCR) + return rv, rc, rn +} + +func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&sec) + parms[1] = unsafe.Pointer(&nsec) + parms[2] = unsafe.Pointer(&events) + parms[3] = unsafe.Pointer(secrem) + parms[4] = unsafe.Pointer(nsecrem) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CTW) + return rv, rc, rn +} +func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [7]unsafe.Pointer + inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte + parms[0] = unsafe.Pointer(&inlen) + parms[1] = unsafe.Pointer(&in) + parms[2] = unsafe.Pointer(outlen) + parms[3] = unsafe.Pointer(&out) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4GTH) + return rv, rc, rn +} +func ZosJobname() (jobname string, err error) { + var pgtha Pgtha + pgtha.Pid = uint32(Getpid()) + pgtha.Accesspid = PGTHA_CURRENT + pgtha.Flag1 = PGTHA_PROCESS + var out [256]byte + var outlen uint32 + outlen = 256 + rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) + if rv == 0 { + gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic + ix := bytes.Index(out[:], gthc) + if ix == -1 { + err = fmt.Errorf("BPX4GTH: gthc return data not found") + return + } + jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 + E2a(jn) + jobname = string(bytes.TrimRight(jn, " ")) + + } else { + err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) + } + return +} +func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { + var userdata [8]byte + var parms [5]unsafe.Pointer + copy(userdata[:], data+" ") + A2e(userdata[:]) + parms[0] = unsafe.Pointer(&code) + parms[1] = unsafe.Pointer(&userdata[0]) + parms[2] = unsafe.Pointer(&rv) + parms[3] = unsafe.Pointer(&rc) + parms[4] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTQ) + return rv, rc, rn +} + +const ( + PT_TRACE_ME = 0 // Debug this process + PT_READ_I = 1 // Read a full word + PT_READ_D = 2 // Read a full word + PT_READ_U = 3 // Read control info + PT_WRITE_I = 4 //Write a full word + PT_WRITE_D = 5 //Write a full word + PT_CONTINUE = 7 //Continue the process + PT_KILL = 8 //Terminate the process + PT_READ_GPR = 11 // Read GPR, CR, PSW + PT_READ_FPR = 12 // Read FPR + PT_READ_VR = 13 // Read VR + PT_WRITE_GPR = 14 // Write GPR, CR, PSW + PT_WRITE_FPR = 15 // Write FPR + PT_WRITE_VR = 16 // Write VR + PT_READ_BLOCK = 17 // Read storage + PT_WRITE_BLOCK = 19 // Write storage + PT_READ_GPRH = 20 // Read GPRH + PT_WRITE_GPRH = 21 // Write GPRH + PT_REGHSET = 22 // Read all GPRHs + PT_ATTACH = 30 // Attach to a process + PT_DETACH = 31 // Detach from a process + PT_REGSET = 32 // Read all GPRs + PT_REATTACH = 33 // Reattach to a process + PT_LDINFO = 34 // Read loader info + PT_MULTI = 35 // Multi process mode + PT_LD64INFO = 36 // RMODE64 Info Area + PT_BLOCKREQ = 40 // Block request + PT_THREAD_INFO = 60 // Read thread info + PT_THREAD_MODIFY = 61 + PT_THREAD_READ_FOCUS = 62 + PT_THREAD_WRITE_FOCUS = 63 + PT_THREAD_HOLD = 64 + PT_THREAD_SIGNAL = 65 + PT_EXPLAIN = 66 + PT_EVENTS = 67 + PT_THREAD_INFO_EXTENDED = 68 + PT_REATTACH2 = 71 + PT_CAPTURE = 72 + PT_UNCAPTURE = 73 + PT_GET_THREAD_TCB = 74 + PT_GET_ALET = 75 + PT_SWAPIN = 76 + PT_EXTENDED_EVENT = 98 + PT_RECOVER = 99 // Debug a program check + PT_GPR0 = 0 // General purpose register 0 + PT_GPR1 = 1 // General purpose register 1 + PT_GPR2 = 2 // General purpose register 2 + PT_GPR3 = 3 // General purpose register 3 + PT_GPR4 = 4 // General purpose register 4 + PT_GPR5 = 5 // General purpose register 5 + PT_GPR6 = 6 // General purpose register 6 + PT_GPR7 = 7 // General purpose register 7 + PT_GPR8 = 8 // General purpose register 8 + PT_GPR9 = 9 // General purpose register 9 + PT_GPR10 = 10 // General purpose register 10 + PT_GPR11 = 11 // General purpose register 11 + PT_GPR12 = 12 // General purpose register 12 + PT_GPR13 = 13 // General purpose register 13 + PT_GPR14 = 14 // General purpose register 14 + PT_GPR15 = 15 // General purpose register 15 + PT_FPR0 = 16 // Floating point register 0 + PT_FPR1 = 17 // Floating point register 1 + PT_FPR2 = 18 // Floating point register 2 + PT_FPR3 = 19 // Floating point register 3 + PT_FPR4 = 20 // Floating point register 4 + PT_FPR5 = 21 // Floating point register 5 + PT_FPR6 = 22 // Floating point register 6 + PT_FPR7 = 23 // Floating point register 7 + PT_FPR8 = 24 // Floating point register 8 + PT_FPR9 = 25 // Floating point register 9 + PT_FPR10 = 26 // Floating point register 10 + PT_FPR11 = 27 // Floating point register 11 + PT_FPR12 = 28 // Floating point register 12 + PT_FPR13 = 29 // Floating point register 13 + PT_FPR14 = 30 // Floating point register 14 + PT_FPR15 = 31 // Floating point register 15 + PT_FPC = 32 // Floating point control register + PT_PSW = 40 // PSW + PT_PSW0 = 40 // Left half of the PSW + PT_PSW1 = 41 // Right half of the PSW + PT_CR0 = 42 // Control register 0 + PT_CR1 = 43 // Control register 1 + PT_CR2 = 44 // Control register 2 + PT_CR3 = 45 // Control register 3 + PT_CR4 = 46 // Control register 4 + PT_CR5 = 47 // Control register 5 + PT_CR6 = 48 // Control register 6 + PT_CR7 = 49 // Control register 7 + PT_CR8 = 50 // Control register 8 + PT_CR9 = 51 // Control register 9 + PT_CR10 = 52 // Control register 10 + PT_CR11 = 53 // Control register 11 + PT_CR12 = 54 // Control register 12 + PT_CR13 = 55 // Control register 13 + PT_CR14 = 56 // Control register 14 + PT_CR15 = 57 // Control register 15 + PT_GPRH0 = 58 // GP High register 0 + PT_GPRH1 = 59 // GP High register 1 + PT_GPRH2 = 60 // GP High register 2 + PT_GPRH3 = 61 // GP High register 3 + PT_GPRH4 = 62 // GP High register 4 + PT_GPRH5 = 63 // GP High register 5 + PT_GPRH6 = 64 // GP High register 6 + PT_GPRH7 = 65 // GP High register 7 + PT_GPRH8 = 66 // GP High register 8 + PT_GPRH9 = 67 // GP High register 9 + PT_GPRH10 = 68 // GP High register 10 + PT_GPRH11 = 69 // GP High register 11 + PT_GPRH12 = 70 // GP High register 12 + PT_GPRH13 = 71 // GP High register 13 + PT_GPRH14 = 72 // GP High register 14 + PT_GPRH15 = 73 // GP High register 15 + PT_VR0 = 74 // Vector register 0 + PT_VR1 = 75 // Vector register 1 + PT_VR2 = 76 // Vector register 2 + PT_VR3 = 77 // Vector register 3 + PT_VR4 = 78 // Vector register 4 + PT_VR5 = 79 // Vector register 5 + PT_VR6 = 80 // Vector register 6 + PT_VR7 = 81 // Vector register 7 + PT_VR8 = 82 // Vector register 8 + PT_VR9 = 83 // Vector register 9 + PT_VR10 = 84 // Vector register 10 + PT_VR11 = 85 // Vector register 11 + PT_VR12 = 86 // Vector register 12 + PT_VR13 = 87 // Vector register 13 + PT_VR14 = 88 // Vector register 14 + PT_VR15 = 89 // Vector register 15 + PT_VR16 = 90 // Vector register 16 + PT_VR17 = 91 // Vector register 17 + PT_VR18 = 92 // Vector register 18 + PT_VR19 = 93 // Vector register 19 + PT_VR20 = 94 // Vector register 20 + PT_VR21 = 95 // Vector register 21 + PT_VR22 = 96 // Vector register 22 + PT_VR23 = 97 // Vector register 23 + PT_VR24 = 98 // Vector register 24 + PT_VR25 = 99 // Vector register 25 + PT_VR26 = 100 // Vector register 26 + PT_VR27 = 101 // Vector register 27 + PT_VR28 = 102 // Vector register 28 + PT_VR29 = 103 // Vector register 29 + PT_VR30 = 104 // Vector register 30 + PT_VR31 = 105 // Vector register 31 + PT_PSWG = 106 // PSWG + PT_PSWG0 = 106 // Bytes 0-3 + PT_PSWG1 = 107 // Bytes 4-7 + PT_PSWG2 = 108 // Bytes 8-11 (IA high word) + PT_PSWG3 = 109 // Bytes 12-15 (IA low word) +) + +func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&request) + parms[1] = unsafe.Pointer(&pid) + parms[2] = unsafe.Pointer(&addr) + parms[3] = unsafe.Pointer(&data) + parms[4] = unsafe.Pointer(&buffer) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTR) + return rv, rc, rn +} + +func copyU8(val uint8, dest []uint8) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU8Arr(src, dest []uint8) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU16(val uint16, dest []uint16) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32(val uint32, dest []uint32) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32Arr(src, dest []uint32) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU64(val uint64, dest []uint64) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s new file mode 100644 index 00000000..4bd4a179 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s @@ -0,0 +1,192 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// function to call USS assembly language services +// +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm +// +// arg1 unsafe.Pointer array that ressembles an OS PLIST +// +// arg2 function offset as in +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm +// +// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 + MOVD plist_base+0(FP), R1 // r1 points to plist + MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table + MOVD R14, R7 // save r14 + MOVD R15, R8 // save r15 + MOVWZ 16(R0), R9 + MOVWZ 544(R9), R9 + MOVWZ 24(R9), R9 // call vector in r9 + ADD R2, R9 // add offset to vector table + MOVWZ (R9), R9 // r9 points to entry point + BYTE $0x0D // BL R14,R9 --> basr r14,r9 + BYTE $0xE9 // clobbers 0,1,14,15 + MOVD R8, R15 // restore 15 + JMP R7 // return via saved return address + +// func A2e(arr [] byte) +// code page conversion from 819 to 1047 +TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // ASCII -> EBCDIC conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f + BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 + BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b + BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d + BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e + BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 + BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 + BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 + BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e + BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f + BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 + BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 + BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 + BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 + BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 + BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 + BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad + BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d + BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 + BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 + BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 + BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 + BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 + BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 + BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 + BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 + BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 + BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b + BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b + BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 + BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b + BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff + BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 + BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 + BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a + BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc + BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa + BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 + BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b + BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab + BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 + BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 + BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 + BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 + BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee + BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf + BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb + BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 + BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 + BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 + BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 + BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 + BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce + BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 + BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb + BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET + +// func e2a(arr [] byte) +// code page conversion from 1047 to 819 +TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // EBCDIC -> ASCII conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f + BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 + BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b + BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b + BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 + BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 + BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 + BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b + BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a + BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 + BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 + BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e + BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c + BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb + BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef + BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 + BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e + BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 + BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 + BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c + BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f + BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb + BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf + BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 + BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 + BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 + BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 + BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb + BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 + BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c + BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 + BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba + BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 + BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 + BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 + BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf + BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae + BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 + BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc + BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 + BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 + BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 + BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 + BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 + BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 + BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c + BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 + BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb + BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff + BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 + BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 + BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 + BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 + BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 + BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb + BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go deleted file mode 100644 index 7753fdde..00000000 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "sync" -) - -// This file simulates epoll on z/OS using poll. - -// Analogous to epoll_event on Linux. -// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDNORM = 0x40 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - // The following constants are part of the epoll API, but represent - // currently unsupported functionality on z/OS. - // EPOLL_CLOEXEC = 0x80000 - // EPOLLET = 0x80000000 - // EPOLLONESHOT = 0x40000000 - // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis - // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode - // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability -) - -// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL -// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). - -// epToPollEvt converts epoll event field to poll equivalent. -// In epoll, Events is a 32-bit field, while poll uses 16 bits. -func epToPollEvt(events uint32) int16 { - var ep2p = map[uint32]int16{ - EPOLLIN: POLLIN, - EPOLLOUT: POLLOUT, - EPOLLHUP: POLLHUP, - EPOLLPRI: POLLPRI, - EPOLLERR: POLLERR, - } - - var pollEvts int16 = 0 - for epEvt, pEvt := range ep2p { - if (events & epEvt) != 0 { - pollEvts |= pEvt - } - } - - return pollEvts -} - -// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. -func pToEpollEvt(revents int16) uint32 { - var p2ep = map[int16]uint32{ - POLLIN: EPOLLIN, - POLLOUT: EPOLLOUT, - POLLHUP: EPOLLHUP, - POLLPRI: EPOLLPRI, - POLLERR: EPOLLERR, - } - - var epollEvts uint32 = 0 - for pEvt, epEvt := range p2ep { - if (revents & pEvt) != 0 { - epollEvts |= epEvt - } - } - - return epollEvts -} - -// Per-process epoll implementation. -type epollImpl struct { - mu sync.Mutex - epfd2ep map[int]*eventPoll - nextEpfd int -} - -// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. -// On Linux, this is an in-kernel data structure accessed through a fd. -type eventPoll struct { - mu sync.Mutex - fds map[int]*EpollEvent -} - -// epoll impl for this process. -var impl epollImpl = epollImpl{ - epfd2ep: make(map[int]*eventPoll), - nextEpfd: 0, -} - -func (e *epollImpl) epollcreate(size int) (epfd int, err error) { - e.mu.Lock() - defer e.mu.Unlock() - epfd = e.nextEpfd - e.nextEpfd++ - - e.epfd2ep[epfd] = &eventPoll{ - fds: make(map[int]*EpollEvent), - } - return epfd, nil -} - -func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { - return e.epollcreate(4) -} - -func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - ep, ok := e.epfd2ep[epfd] - if !ok { - - return EBADF - } - - switch op { - case EPOLL_CTL_ADD: - // TODO(neeilan): When we make epfds and fds disjoint, detect epoll - // loops here (instances watching each other) and return ELOOP. - if _, ok := ep.fds[fd]; ok { - return EEXIST - } - ep.fds[fd] = event - case EPOLL_CTL_MOD: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - ep.fds[fd] = event - case EPOLL_CTL_DEL: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - delete(ep.fds, fd) - - } - return nil -} - -// Must be called while holding ep.mu -func (ep *eventPoll) getFds() []int { - fds := make([]int, len(ep.fds)) - for fd := range ep.fds { - fds = append(fds, fd) - } - return fds -} - -func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { - e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait - ep, ok := e.epfd2ep[epfd] - - if !ok { - e.mu.Unlock() - return 0, EBADF - } - - pollfds := make([]PollFd, 4) - for fd, epollevt := range ep.fds { - pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) - } - e.mu.Unlock() - - n, err = Poll(pollfds, msec) - if err != nil { - return n, err - } - - i := 0 - for _, pFd := range pollfds { - if pFd.Revents != 0 { - events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} - i++ - } - - if i == n { - break - } - } - - return n, nil -} - -func EpollCreate(size int) (fd int, err error) { - return impl.epollcreate(size) -} - -func EpollCreate1(flag int) (fd int, err error) { - return impl.epollcreate1(flag) -} - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return impl.epollctl(epfd, op, fd, event) -} - -// Because EpollWait mutates events, the caller is expected to coordinate -// concurrent access if calling with the same epfd from multiple goroutines. -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return impl.epollwait(epfd, events, msec) -} diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go deleted file mode 100644 index c8bde601..00000000 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "unsafe" -) - -// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - var stat_v Statvfs_t - err = Fstatvfs(fd, &stat_v) - if err == nil { - // populate stat - stat.Type = 0 - stat.Bsize = stat_v.Bsize - stat.Blocks = stat_v.Blocks - stat.Bfree = stat_v.Bfree - stat.Bavail = stat_v.Bavail - stat.Files = stat_v.Files - stat.Ffree = stat_v.Ffree - stat.Fsid = stat_v.Fsid - stat.Namelen = stat_v.Namemax - stat.Frsize = stat_v.Frsize - stat.Flags = stat_v.Flag - for passn := 0; passn < 5; passn++ { - switch passn { - case 0: - err = tryGetmntent64(stat) - break - case 1: - err = tryGetmntent128(stat) - break - case 2: - err = tryGetmntent256(stat) - break - case 3: - err = tryGetmntent512(stat) - break - case 4: - err = tryGetmntent1024(stat) - break - default: - break - } - //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) - if err == nil || err != nil && err != ERANGE { - break - } - } - } - return err -} - -func tryGetmntent64(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [64]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent128(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [128]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent256(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [256]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent512(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [512]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent1024(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [1024]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974..4ed2e488 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -263,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -549,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e597..7f602ffd 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 4d0a3430..0482408d 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 130398b6..b903c006 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin +//go:build darwin || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go new file mode 100644 index 00000000..3e53dbc0 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go @@ -0,0 +1,58 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Socket control messages + +package unix + +import "unsafe" + +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, CmsgSpace(SizeofUcred)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_CREDENTIALS + h.SetLen(CmsgLen(SizeofUcred)) + *(*Ucred)(h.data(0)) = *ucred + return b +} + +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_CREDENTIALS { + return nil, EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s new file mode 100644 index 00000000..3c4f33cb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s @@ -0,0 +1,75 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc + +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Pipe2(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flock(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Nanosleep(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Wait4(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unmount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNanoAt(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNano(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkfifoat(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Chtag(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Readlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 16dc6993..2f0fa76e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && go1.12 +//go:build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 64d1bb4d..2b57e0f7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -13,6 +13,7 @@ package unix import ( + "errors" "sync" "unsafe" ) @@ -169,25 +170,26 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) - if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil { + // Suppress ENOMEM errors to be compatible with the C library __xuname() implementation. + if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_HOSTNAME} n = unsafe.Sizeof(uname.Nodename) - if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_OSRELEASE} n = unsafe.Sizeof(uname.Release) - if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } mib = []_C_int{CTL_KERN, KERN_VERSION} n = unsafe.Sizeof(uname.Version) - if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } @@ -205,7 +207,7 @@ func Uname(uname *Utsname) error { mib = []_C_int{CTL_HW, HW_MACHINE} n = unsafe.Sizeof(uname.Machine) - if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil { + if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil && !errors.Is(err, ENOMEM) { return err } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0f85e29e..5682e262 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1849,6 +1849,105 @@ func Dup2(oldfd, newfd int) error { //sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) //sys Fsopen(fsName string, flags int) (fd int, err error) //sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) + +//sys fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) + +func fsconfigCommon(fd int, cmd uint, key string, value *byte, aux int) (err error) { + var keyp *byte + if keyp, err = BytePtrFromString(key); err != nil { + return + } + return fsconfig(fd, cmd, keyp, value, aux) +} + +// FsconfigSetFlag is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FLAG. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +func FsconfigSetFlag(fd int, key string) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FLAG, key, nil, 0) +} + +// FsconfigSetString is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_STRING. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetString(fd int, key string, value string) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(value); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_STRING, key, valuep, 0) +} + +// FsconfigSetBinary is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_BINARY. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is the parameter value to set. +func FsconfigSetBinary(fd int, key string, value []byte) (err error) { + if len(value) == 0 { + return EINVAL + } + return fsconfigCommon(fd, FSCONFIG_SET_BINARY, key, &value[0], len(value)) +} + +// FsconfigSetPath is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// path is a non-empty path for specified key. +// atfd is a file descriptor at which to start lookup from or AT_FDCWD. +func FsconfigSetPath(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH, key, valuep, atfd) +} + +// FsconfigSetPathEmpty is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_PATH_EMPTY. The same as +// FconfigSetPath but with AT_PATH_EMPTY implied. +func FsconfigSetPathEmpty(fd int, key string, path string, atfd int) (err error) { + var valuep *byte + if valuep, err = BytePtrFromString(path); err != nil { + return + } + return fsconfigCommon(fd, FSCONFIG_SET_PATH_EMPTY, key, valuep, atfd) +} + +// FsconfigSetFd is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_SET_FD. +// +// fd is the filesystem context to act upon. +// key the parameter key to set. +// value is a file descriptor to be assigned to specified key. +func FsconfigSetFd(fd int, key string, value int) (err error) { + return fsconfigCommon(fd, FSCONFIG_SET_FD, key, nil, value) +} + +// FsconfigCreate is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_CREATE. +// +// fd is the filesystem context to act upon. +func FsconfigCreate(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_CREATE, nil, nil, 0) +} + +// FsconfigReconfigure is equivalent to fsconfig(2) called +// with cmd == FSCONFIG_CMD_RECONFIGURE. +// +// fd is the filesystem context to act upon. +func FsconfigReconfigure(fd int) (err error) { + return fsconfig(fd, FSCONFIG_CMD_RECONFIGURE, nil, nil, 0) +} + //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c..312ae6ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -4,11 +4,21 @@ //go:build zos && s390x +// Many of the following syscalls are not available on all versions of z/OS. +// Some missing calls have legacy implementations/simulations but others +// will be missing completely. To achieve consistent failing behaviour on +// legacy systems, we first test the function pointer via a safeloading +// mechanism to see if the function exists on a given system. Then execution +// is branched to either continue the function call, or return an error. + package unix import ( "bytes" "fmt" + "os" + "reflect" + "regexp" "runtime" "sort" "strings" @@ -17,17 +27,205 @@ import ( "unsafe" ) +//go:noescape +func initZosLibVec() + +//go:noescape +func GetZosLibVec() uintptr + +func init() { + initZosLibVec() + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) + if r0 != 0 { + n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + ZosTraceLevel = int(n) + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) + if r0 != 0 { + fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + f := os.NewFile(fd, "zostracefile") + if f != nil { + ZosTracefile = f + } + } + + } +} + +//go:noescape +func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +//go:noescape +func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +// ------------------------------- +// pointer validity test +// good pointer returns 0 +// bad pointer returns 1 +// +//go:nosplit +func ptrtest(uintptr) uint64 + +// Load memory at ptr location with error handling if the location is invalid +// +//go:noescape +func safeload(ptr uintptr) (value uintptr, error uintptr) + const ( - O_CLOEXEC = 0 // Dummy value (not supported). - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX + entrypointLocationOffset = 8 // From function descriptor + + xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" + eyecatcherOffset = 16 // From function entrypoint (negative) + ppa1LocationOffset = 8 // From function entrypoint (negative) + + nameLenOffset = 0x14 // From PPA1 start + nameOffset = 0x16 // From PPA1 start ) -func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func getPpaOffset(funcptr uintptr) int64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return -1 + } + + // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) + val, err := safeload(entrypoint - eyecatcherOffset) + if err != 0 { + return -1 + } + if val != xplinkEyecatcher { + return -1 + } + + ppaoff, err := safeload(entrypoint - ppa1LocationOffset) + if err != 0 { + return -1 + } + + ppaoff >>= 32 + return int64(ppaoff) +} + +//------------------------------- +// function descriptor pointer validity test +// good pointer returns 0 +// bad pointer returns 1 + +// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName +// have correct funcName pass to the funcptrtest function +func funcptrtest(funcptr uintptr, funcName string) uint64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return 1 + } + + ppaoff := getPpaOffset(funcptr) + if ppaoff == -1 { + return 1 + } + + // PPA1 offset value is from the start of the entire function block, not the entrypoint + ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) + + nameLen, err := safeload(ppa1 + nameLenOffset) + if err != 0 { + return 1 + } + + nameLen >>= 48 + if nameLen > 128 { + return 1 + } + + // no function name input to argument end here + if funcName == "" { + return 0 + } + + var funcname [128]byte + for i := 0; i < int(nameLen); i += 8 { + v, err := safeload(ppa1 + nameOffset + uintptr(i)) + if err != 0 { + return 1 + } + funcname[i] = byte(v >> 56) + funcname[i+1] = byte(v >> 48) + funcname[i+2] = byte(v >> 40) + funcname[i+3] = byte(v >> 32) + funcname[i+4] = byte(v >> 24) + funcname[i+5] = byte(v >> 16) + funcname[i+6] = byte(v >> 8) + funcname[i+7] = byte(v) + } + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) + + name := string(funcname[:nameLen]) + if name != funcName { + return 1 + } + + return 0 +} + +// For detection of capabilities on a system. +// Is function descriptor f a valid function? +func isValidLeFunc(f uintptr) error { + ret := funcptrtest(f, "") + if ret != 0 { + return fmt.Errorf("Bad pointer, not an LE function ") + } + return nil +} + +// Retrieve function name from descriptor +func getLeFuncName(f uintptr) (string, error) { + // assume it has been checked, only check ppa1 validity here + entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] + preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) + + offsetPpa1 := preamp[2] + if offsetPpa1 > 0x0ffff { + return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) + } + + ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) + res := ptrtest(ppa1) + if res != 0 { + return "", fmt.Errorf("PPA1 address not valid") + } + + size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) + if size > 128 { + return "", fmt.Errorf("Function name seems too long, length=%d\n", size) + } + + var name [128]byte + funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) + copy(name[0:size], funcname[0:size]) + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) + + return string(name[:size]), nil +} + +// Check z/OS version +func zosLeVersion() (version, release uint32) { + p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) + vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) + version = (vrm & 0x00ff0000) >> 16 + release = (vrm & 0x0000ff00) >> 8 + return +} + +// returns a zos C FILE * for stdio fd 0, 1, 2 +func ZosStdioFilep(fd int32) uintptr { + return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) +} func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -65,6 +263,21 @@ func (d *Dirent) NameString() string { } } +func DecodeData(dest []byte, sz int, val uint64) { + for i := 0; i < sz; i++ { + dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) + } +} + +func EncodeData(data []byte) uint64 { + var value uint64 + sz := len(data) + for i := 0; i < sz; i++ { + value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) + } + return value +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -74,7 +287,9 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -88,7 +303,9 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -146,7 +363,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil case AF_INET6: @@ -155,7 +374,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil } return nil, EAFNOSUPPORT @@ -177,6 +398,43 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Ctermid() (tty string, err error) { + var termdev [1025]byte + runtime.EnterSyscall() + r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) + runtime.ExitSyscall() + if r0 == 0 { + return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + s := string(termdev[:]) + idx := strings.Index(s, string(rune(0))) + if idx == -1 { + tty = s + } else { + tty = s[:idx] + } + return +} + func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -190,10 +448,16 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A +//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A +//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -204,6 +468,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A @@ -212,6 +477,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP //sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL //sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 +//sys shmdt(addr uintptr) (err error) = SYS_SHMDT +//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -220,14 +489,31 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 +//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD +//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE +//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 +//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL +//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A + +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + return Faccessat(dirfd, path, mode, flags) +} + //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) +//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -236,28 +522,208 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var statLE Stat_LE_t + err = fstatat(dirfd, path, &statLE, flags) + copyStat(stat, &statLE) + return +} + +func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) + +var Getxattr = enter_Getxattr + +func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + funcref := get_GetxattrAddr() + if validGetxattr() { + *funcref = impl_Getxattr + } else { + *funcref = error_Getxattr + } + return (*funcref)(path, attr, dest) +} + +func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return -1, ENOSYS +} + +func validGetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { + return name == "__getxattr_a" + } + } + return false +} + +//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A + +func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Setxattr = enter_Setxattr + +func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_SetxattrAddr() + if validSetxattr() { + *funcref = impl_Setxattr + } else { + *funcref = error_Setxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { + return ENOSYS +} + +func validSetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { + return name == "__setxattr_a" + } + } + return false +} + +//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) +//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES +//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM +//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT +//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A +//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH +//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A +//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A +//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A +//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 + +// Pipe2 begin + +//go:nosplit +func getPipe2Addr() *(func([]int, int) error) + +var Pipe2 = pipe2Enter + +func pipe2Enter(p []int, flags int) (err error) { + if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { + *getPipe2Addr() = pipe2Impl + } else { + *getPipe2Addr() = pipe2Error + } + return (*getPipe2Addr())(p, flags) +} + +func pipe2Impl(p []int, flags int) (err error) { + var pp [2]_C_int + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } else { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return +} +func pipe2Error(p []int, flags int) (err error) { + return fmt.Errorf("Pipe2 is not available on this system") +} + +// Pipe2 end + //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL + +func Readdir(dir uintptr) (dirent *Dirent, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) + runtime.ExitSyscall() + dirent = (*Dirent)(unsafe.Pointer(r0)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A +//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A +//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A +//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A +//sys Unshare(flags int) (err error) = SYS_UNSHARE func Ptsname(fd int) (name string, err error) { - r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) - name = u2s(unsafe.Pointer(r0)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) + runtime.ExitSyscall() + if r0 == 0 { + err = errnoErr2(e1, e2) + } else { + name = u2s(unsafe.Pointer(r0)) } return } @@ -272,13 +738,19 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() for i := 0; e1 == EAGAIN && i < 10; i++ { - _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) - _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) + runtime.ExitSyscall() + runtime.EnterSyscall() + r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() } - if e1 != 0 { - err = errnoErr(e1) + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -288,9 +760,15 @@ func Madvise(b []byte, advice int) (err error) { return } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A -//sysnb Getegid() (egid int) -//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -317,11 +795,14 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +//sys Getegid() (egid int) = SYS_GETEGID +//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -332,15 +813,150 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ +func isSpecialPath(path []byte) (v bool) { + var special = [4][8]byte{ + [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + + var i, j int + for i = 0; i < len(special); i++ { + for j = 0; j < len(special[i]); j++ { + if path[j] != special[i][j] { + break + } + } + if j == len(special[i]) { + return true + } + } + return false +} + +func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { + var source [1024]byte + copy(source[:], srcpath) + source[len(srcpath)] = 0 + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() + []uintptr{uintptr(unsafe.Pointer(&source[0])), + uintptr(unsafe.Pointer(&abspath[0]))}) + if ret != 0 { + index := bytes.IndexByte(abspath[:], byte(0)) + if index != -1 { + return index, 0 + } + } else { + errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() + return 0, *errptr + } + return 0, 245 // EBADDATA 245 +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, + []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) + runtime.KeepAlive(unsafe.Pointer(_p0)) + if n == -1 { + value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) + err = errnoErr(Errno(value)) + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + return n, err + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +//go:nosplit +func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) + +var Readlinkat = enter_Readlinkat + +func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + funcref := get_ReadlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { + *funcref = impl_Readlinkat + } else { + *funcref = error_Readlinkat + } + return (*funcref)(dirfd, path, buf) +} + +func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + n = -1 + err = ENOSYS + return +} + //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A +//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT //sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setegid(egid int) (err error) = SYS_SETEGID +//sys Seteuid(euid int) (err error) = SYS_SETEUID +//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A +//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -360,32 +976,57 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { + if mode&O_ACCMODE == 0 { + mode |= O_RDONLY + } return open(path, mode, perm) } -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - wd, err := Getwd() - if err != nil { - return err +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + if flags&O_ACCMODE == 0 { + flags |= O_RDONLY } + return openat(dirfd, path, flags, mode) +} - if err := Fchdir(dirfd); err != nil { - return err +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + if how.Flags&O_ACCMODE == 0 { + how.Flags |= O_RDONLY } - defer Chdir(wd) + return openat2(dirfd, path, how, SizeofOpenHow) +} - return Mkfifo(path, mode) +func ZosFdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + runtime.EnterSyscall() + ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) + runtime.ExitSyscall() + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) + return string(buffer[:zb]), nil + } + return "", errnoErr2(e1, e2) } //sys remove(path string) (err error) @@ -403,10 +1044,12 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) + runtime.ExitSyscall() n = clen(buf) + 1 - if e != 0 { - err = errnoErr(e) + if r0 == 0 { + err = errnoErr2(e1, e2) } return } @@ -520,9 +1163,41 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } +//sys waitid(idType int, id int, info *Siginfo, options int) (err error) + +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + return waitid(idType, id, info, options) +} + //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) + runtime.ExitSyscall() + wpid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) + +var Wait4 = enter_Wait4 + +func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + funcref := get_Wait4Addr() + if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { + *funcref = impl_Wait4 + } else { + *funcref = legacyWait4 + } + return (*funcref)(pid, wstatus, options, rusage) +} + +func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -571,23 +1246,62 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } + p[0] = int(pp[0]) + p[1] = int(pp[1]) return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNano(path string, ts []Timespec) error { +//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A + +func validUtimensat() bool { + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { + return name == "__utimensat_a" + } + } + return false +} + +// Begin UtimesNano + +//go:nosplit +func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) + +var UtimesNano = enter_UtimesNano + +func enter_UtimesNano(path string, ts []Timespec) (err error) { + funcref := get_UtimesNanoAddr() + if validUtimensat() { + *funcref = utimesNanoImpl + } else { + *funcref = legacyUtimesNano + } + return (*funcref)(path, ts) +} + +func utimesNanoImpl(path string, ts []Timespec) (err error) { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func legacyUtimesNano(path string, ts []Timespec) (err error) { if len(ts) != 2 { return EINVAL } @@ -600,6 +1314,70 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } +// End UtimesNano + +// Begin UtimesNanoAt + +//go:nosplit +func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) + +var UtimesNanoAt = enter_UtimesNanoAt + +func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + funcref := get_UtimesNanoAtAddr() + if validUtimensat() { + *funcref = utimesNanoAtImpl + } else { + *funcref = legacyUtimesNanoAt + } + return (*funcref)(dirfd, path, ts, flags) +} + +func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + if path[0] != '/' { + dirPath, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + path = dirPath + "/" + path + } + if flags == AT_SYMLINK_NOFOLLOW { + if len(ts) != 2 { + return EINVAL + } + + if ts[0].Nsec >= 5e8 { + ts[0].Sec++ + } + ts[0].Nsec = 0 + if ts[1].Nsec >= 5e8 { + ts[1].Sec++ + } + ts[1].Nsec = 0 + + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := []Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return Lutimes(path, tv) + } + return UtimesNano(path, ts) +} + +// End UtimesNanoAt + func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1186,67 +1964,46 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return n, nil } -func Opendir(name string) (uintptr, error) { - p, err := BytePtrFromString(name) - if err != nil { - return 0, err - } - dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) - runtime.KeepAlive(unsafe.Pointer(p)) - if e != 0 { - err = errnoErr(e) - } - return dir, err -} - -// clearsyscall.Errno resets the errno value to 0. -func clearErrno() - -func Readdir(dir uintptr) (*Dirent, error) { - var ent Dirent - var res uintptr - // __readdir_r_a returns errno at the end of the directory stream, rather than 0. - // Therefore to avoid false positives we clear errno before calling it. - - // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" - //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. - - e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) - var err error - if e != 0 { - err = errnoErr(Errno(e)) - } - if res == 0 { - return nil, err - } - return &ent, err -} - -func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - if int64(r0) == -1 { - err = errnoErr(Errno(e1)) +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err } - return + err = nil + runtime.EnterSyscall() + dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) + runtime.ExitSyscall() + runtime.KeepAlive(unsafe.Pointer(p)) + if dir == 0 { + err = errnoErr2(e1, e2) + } + return dir, err } +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + func Closedir(dir uintptr) error { - _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) - if e != 0 { - return errnoErr(e) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) + runtime.ExitSyscall() + if r0 != 0 { + return errnoErr2(e1, e2) } return nil } func Seekdir(dir uintptr, pos int) { - _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) + runtime.ExitSyscall() } func Telldir(dir uintptr) (int, error) { - p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) pos := int(p) - if pos == -1 { - return pos, errnoErr(e) + if int64(p) == -1 { + return pos, errnoErr2(e1, e2) } return pos, nil } @@ -1261,19 +2018,55 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.ExitSyscall() lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if errno == 0 { + if r0 == 0 { return nil } - return errno + return errnoErr2(e1, e2) +} + +func impl_Flock(fd int, how int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlockAddr() *(func(fd int, how int) (err error)) + +var Flock = enter_Flock + +func validFlock(fp uintptr) bool { + if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { + return name == "flock" + } + } + return false +} + +func enter_Flock(fd int, how int) (err error) { + funcref := get_FlockAddr() + if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { + *funcref = impl_Flock + } else { + *funcref = legacyFlock + } + return (*funcref)(fd, how) } -func Flock(fd int, how int) error { +func legacyFlock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -1307,41 +2100,51 @@ func Flock(fd int, how int) error { } func Mlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlock2(b []byte, flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlockall() (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -1372,15 +2175,104 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { - fd, err := open(path, O_RDONLY, 0) - defer Close(fd) - if err != nil { - return err +// Chtag + +//go:nosplit +func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) + +var Chtag = enter_Chtag + +func enter_Chtag(path string, ccsid uint64, textbit uint64) error { + funcref := get_ChtagAddr() + if validSetxattr() { + *funcref = impl_Chtag + } else { + *funcref = legacy_Chtag + } + return (*funcref)(path, ccsid, textbit) +} + +func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [8]byte + DecodeData(tag_buff[:], 8, tag) + return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) +} + +func impl_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [4]byte + DecodeData(tag_buff[:], 4, tag) + return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) +} + +// End of Chtag + +// Nanosleep + +//go:nosplit +func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) + +var Nanosleep = enter_Nanosleep + +func enter_Nanosleep(time *Timespec, leftover *Timespec) error { + funcref := get_NanosleepAddr() + if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { + *funcref = impl_Nanosleep + } else { + *funcref = legacyNanosleep + } + return (*funcref)(time, leftover) +} + +func impl_Nanosleep(time *Timespec, leftover *Timespec) error { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + runtime.ExitSyscall() + if int64(r0) == -1 { + return errnoErr2(e1, e2) + } + return nil +} + +func legacyNanosleep(time *Timespec, leftover *Timespec) error { + t0 := runtime.Nanotime1() + var secrem uint32 + var nsecrem uint32 + total := time.Sec*1000000000 + time.Nsec + elapsed := runtime.Nanotime1() - t0 + var rv int32 + var rc int32 + var err error + // repeatedly sleep for 1 second until less than 1 second left + for total-elapsed > 1000000000 { + rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) + if rv != 0 && rc != 112 { // 112 is EAGAIN + if leftover != nil && rc == 120 { // 120 is EINTR + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + err = Errno(rc) + return err + } + elapsed = runtime.Nanotime1() - t0 } - return Fstatfs(fd, stat) + // sleep the remainder + if total > elapsed { + rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) + } + if leftover != nil && rc == 120 { + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + if rv != 0 && rc != 112 { + err = Errno(rc) + } + return err } +// End of Nanosleep + var ( Stdin = 0 Stdout = 1 @@ -1395,6 +2287,9 @@ var ( errENOENT error = syscall.ENOENT ) +var ZosTraceLevel int +var ZosTracefile *os.File + var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -1416,6 +2311,56 @@ func errnoErr(e Errno) error { return e } +var reg *regexp.Regexp + +// enhanced with zos specific errno2 +func errnoErr2(e Errno, e2 uintptr) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + /* + Allow the retrieval of errno2 for EINVAL and ENOENT on zos + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + */ + } + if ZosTraceLevel > 0 { + var name string + if reg == nil { + reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") + } + i := 1 + pc, file, line, ok := runtime.Caller(i) + if ok { + name = runtime.FuncForPC(pc).Name() + } + for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { + i += 1 + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if ZosTracefile == nil { + ZosConsolePrintf("From %s:%d\n", file, line) + ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) + fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } + } else { + if ZosTracefile == nil { + ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) + } + } + } + return e +} + // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -1474,6 +2419,9 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } + // Set __MAP_64 by default + flags |= __MAP_64 + // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -1778,83 +2726,170 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { +func Getag(path string) (ccsid uint16, flag uint16, err error) { + var val [8]byte + sz, err := Getxattr(path, "ccsid", val[:]) + if err != nil { + return + } + ccsid = uint16(EncodeData(val[0:sz])) + sz, err = Getxattr(path, "flags", val[:]) + if err != nil { + return + } + flag = uint16(EncodeData(val[0:sz]) >> 15) + return +} + +// Mount begin +func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(data) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) + +var Mount = enter_Mount + +func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + funcref := get_MountAddr() + if validMount() { + *funcref = impl_Mount + } else { + *funcref = legacyMount + } + return (*funcref)(source, target, fstype, flags, data) +} + +func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[:8] + fstype = fstype[0:8] } else { - fstype += " "[:needspace] + fstype += " "[0:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func Unmount(name string, mtm int) (err error) { +func validMount() bool { + if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { + return name == "__mount1_a" + } + } + return false +} + +// Mount end + +// Unmount begin +func impl_Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnmountAddr() *(func(target string, flags int) (err error)) + +var Unmount = enter_Unmount + +func enter_Unmount(target string, flags int) (err error) { + funcref := get_UnmountAddr() + if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { + *funcref = impl_Unmount + } else { + *funcref = legacyUnmount + } + return (*funcref)(target, flags) +} + +func legacyUnmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount(name, mtm) + return unmount_LE(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - nulli := bytes.IndexByte(arr, 0) - if nulli == -1 { - return string(arr) - } else { - return string(arr[:nulli]) + var str string + for i := 0; i < len(arr); i++ { + if arr[i] == 0 { + str = string(arr[:i]) + break + } } + return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err != nil { - return err - } - if fsCount == 0 { - return EINVAL - } - for i := 0; i < fsCount; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break + fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err == nil { + err = EINVAL + for i := 0; i < fs_count; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } } + } else if fs_count == 0 { + err = EINVAL } return err } -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) +// Unmount end + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -1896,7 +2931,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := fdToPath(fd) + path, err := ZosFdToPath(fd) if err != nil { return 0, err } @@ -1910,7 +2945,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := readdir_r(d, &entryLE, &entrypLE) + e := Readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -1956,23 +2991,127 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func ReadDirent(fd int, buf []byte) (n int, err error) { - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) +func Err2ad() (eadd *int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) + eadd = (*int)(unsafe.Pointer(r0)) + return } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +func ZosConsolePrintf(format string, v ...interface{}) (int, error) { + type __cmsg struct { + _ uint16 + _ [2]uint8 + __msg_length uint32 + __msg uintptr + _ [4]uint8 + } + msg := fmt.Sprintf(format, v...) + strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) + len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len + cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} + cmd := uint32(0) + runtime.EnterSyscall() + rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) + runtime.ExitSyscall() + if rc != 0 { + return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + return 0, nil +} +func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { + if nullterm { + ebcdicBytes = []byte(str + "\x00") + } else { + ebcdicBytes = []byte(str) + } + A2e(ebcdicBytes) + return +} +func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { + res := make([]byte, len(b)) + copy(res, b) + E2a(res) + if trimRight { + str = string(bytes.TrimRight(res, " \x00")) + } else { + str = string(res) + } + return } -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } } -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkfifoat = enter_Mkfifoat + +func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkfifoatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { + *funcref = impl_Mkfifoat + } else { + *funcref = legacy_Mkfifoat + } + return (*funcref)(dirfd, path, mode) +} + +func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + dirname, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + return Mkfifo(dirname+"/"+path, mode) } + +//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT +//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT +//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 79a84f18..672d6b0a 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux +//go:build (darwin && !ios) || linux || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 9eb0db66..8b7977a2 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && !ios +//go:build (darwin && !ios) || zos package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 36bf8399..877a62b4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -491,6 +491,7 @@ const ( BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 @@ -501,6 +502,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -656,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -1338,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1626,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1652,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -1697,6 +1705,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 @@ -1898,6 +1907,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2166,7 +2176,7 @@ const ( NFT_SECMARK_CTX_MAXLEN = 0x100 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2302,6 +2312,7 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 PERF_BR_ARM64_DEBUG_DATA = 0x7 PERF_BR_ARM64_DEBUG_EXIT = 0x5 PERF_BR_ARM64_DEBUG_HALT = 0x4 @@ -2399,6 +2410,7 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e @@ -2892,8 +2904,9 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -2914,7 +2927,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3047,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3067,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3168,6 +3187,7 @@ const ( STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 + STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3255,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 @@ -3562,12 +3583,16 @@ const ( XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 + XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_TIMESTAMP = 0x1 + XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 XDP_USE_SG = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c..e4bc0bd5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca43600..689317af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d1..14270508 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -87,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index 4dfd2e05..da08b2ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -10,41 +10,99 @@ package unix const ( - BRKINT = 0x0001 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" + BRKINT = 0x0001 + CLOCAL = 0x1 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLONE_NEWIPC = 0x08000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x00020000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUTS = 0x04000000 + CLONE_PARENT = 0x00008000 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + EFD_SEMAPHORE = 0x00002000 + EFD_CLOEXEC = 0x00001000 + EFD_NONBLOCK = 0x00000004 + EPOLL_CLOEXEC = 0x00001000 + EPOLL_CTL_ADD = 0 + EPOLL_CTL_MOD = 1 + EPOLL_CTL_DEL = 2 + EPOLLRDNORM = 0x0001 + EPOLLRDBAND = 0x0002 + EPOLLIN = 0x0003 + EPOLLOUT = 0x0004 + EPOLLWRBAND = 0x0008 + EPOLLPRI = 0x0010 + EPOLLERR = 0x0020 + EPOLLHUP = 0x0040 + EPOLLEXCLUSIVE = 0x20000000 + EPOLLONESHOT = 0x40000000 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FD_SETSIZE = 0x800 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + GRND_NONBLOCK = 1 + GRND_RANDOM = 2 + HUPCL = 0x0100 // Hang up on last close + IN_CLOEXEC = 0x00001000 + IN_NONBLOCK = 0x00000004 + IN_ACCESS = 0x00000001 + IN_MODIFY = 0x00000002 + IN_ATTRIB = 0x00000004 + IN_CLOSE_WRITE = 0x00000008 + IN_CLOSE_NOWRITE = 0x00000010 + IN_OPEN = 0x00000020 + IN_MOVED_FROM = 0x00000040 + IN_MOVED_TO = 0x00000080 + IN_CREATE = 0x00000100 + IN_DELETE = 0x00000200 + IN_DELETE_SELF = 0x00000400 + IN_MOVE_SELF = 0x00000800 + IN_UNMOUNT = 0x00002000 + IN_Q_OVERFLOW = 0x00004000 + IN_IGNORED = 0x00008000 + IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) + IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | + IN_CLOSE | IN_OPEN | IN_MOVE | + IN_CREATE | IN_DELETE | IN_DELETE_SELF | + IN_MOVE_SELF) + IN_ONLYDIR = 0x01000000 + IN_DONT_FOLLOW = 0x02000000 + IN_EXCL_UNLINK = 0x04000000 + IN_MASK_CREATE = 0x10000000 + IN_MASK_ADD = 0x20000000 + IN_ISDIR = 0x40000000 + IN_ONESHOT = 0x80000000 IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -152,10 +210,18 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 3 + IP_TTL = 14 IP_UNBLOCK_SOURCE = 11 + ICMP6_FILTER = 1 + MCAST_INCLUDE = 0 + MCAST_EXCLUDE = 1 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 - ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -165,10 +231,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 // Not exist on zOS - LOCK_EX = 0x2 // Not exist on zOS - LOCK_NB = 0x4 // Not exist on zOS - LOCK_UN = 0x8 // Not exist on zOS + LOCK_SH = 0x1 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_UN = 0x8 POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -182,15 +248,29 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 45 + __MAP_MEGA = 0x8 + __MAP_64 = 0x10 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings + MS_BIND = 0x00001000 + MS_MOVE = 0x00002000 + MS_NOSUID = 0x00000002 + MS_PRIVATE = 0x00040000 + MS_REC = 0x00004000 + MS_REMOUNT = 0x00008000 + MS_RDONLY = 0x00000001 + MS_UNBINDABLE = 0x00020000 + MNT_DETACH = 0x00000004 + ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS + NFS_SUPER_MAGIC = 0x6969 // NFS + NSFS_MAGIC = 0x6e736673 // PROCNS + PROC_SUPER_MAGIC = 0x9fa0 // proc FS + ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS + ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS + ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -205,13 +285,20 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 + ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 + O_DIRECT = 0x00002000 + O_NOFOLLOW = 0x00004000 + O_DIRECTORY = 0x00008000 + O_PATH = 0x00080000 + O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 + O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -248,6 +335,7 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -285,15 +373,33 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 + RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 + SCHED_FIFO = 0x2 + SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 + SHM_RND = 0x2 + SHM_RDONLY = 0x1 + SHMLBA = 0x1000 + IPC_STAT = 0x3 + IPC_SET = 0x2 + IPC_RMID = 0x1 + IPC_PRIVATE = 0x0 + IPC_CREAT = 0x1000000 + __IPC_MEGA = 0x4000000 + __IPC_SHAREAS = 0x20000000 + __IPC_BELOWBAR = 0x10000000 + IPC_EXCL = 0x2000000 + __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 + SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 + SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -378,8 +484,6 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 - TCP_INFO = 0xb - TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -427,7 +531,10 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 + WEXITED = 0x8 WNOHANG = 0x1 + WNOWAIT = 0x20 + WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -452,8 +559,28 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics - AT_FDCWD = 2 // for Unix compatibility -- no zos semantics + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + P_PID = 0 + P_PGID = 1 + P_ALL = 2 + PR_SET_NAME = 15 + PR_GET_NAME = 16 + PR_SET_NO_NEW_PRIVS = 38 + PR_GET_NO_NEW_PRIVS = 39 + PR_SET_DUMPABLE = 4 + PR_GET_DUMPABLE = 3 + PR_SET_PDEATHSIG = 1 + PR_GET_PDEATHSIG = 2 + PR_SET_CHILD_SUBREAPER = 36 + PR_GET_CHILD_SUBREAPER = 37 + AT_FDCWD = -100 + AT_EACCESS = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_REMOVEDIR = 0x200 + RENAME_NOREPLACE = 1 << 0 ) const ( @@ -476,6 +603,7 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) + ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -700,7 +828,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "", ""}, + {148, "EDC5148I", "The named attribute or data not available."}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -743,6 +871,7 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s new file mode 100644 index 00000000..b77ff5db --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s @@ -0,0 +1,364 @@ +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·accept4(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Removexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dup3(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dirfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCtl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollPwait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollWait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Eventfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Faccessat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchmodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchownat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fdatasync(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·fstatat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fstatfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimesat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getrandom(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyAddWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyRmWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Listxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Llistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lutimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Statfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Syncfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unshare(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Linkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkdirat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mknodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·PivotRoot(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prctl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prlimit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Sethostname(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setns(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Symlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·utimensat(SB), R8 + MOVD R8, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1488d271..87d8612a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -906,6 +906,16 @@ func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fsconfig(fd int, cmd uint, key *byte, value *byte, aux int) (err error) { + _, _, e1 := Syscall6(SYS_FSCONFIG, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(value)), uintptr(aux), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 94f01123..7ccf66b7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x @@ -6,17 +6,100 @@ package unix import ( + "runtime" + "syscall" "unsafe" ) +var _ syscall.Errno + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() val = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) + +var Flistxattr = enter_Flistxattr + +func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { + funcref := get_FlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Flistxattr + } else { + *funcref = error_Flistxattr + } + return (*funcref)(fd, dest) +} + +func error_Flistxattr(fd int, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) + +var Fremovexattr = enter_Fremovexattr + +func enter_Fremovexattr(fd int, attr string) (err error) { + funcref := get_FremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Fremovexattr + } else { + *funcref = error_Fremovexattr } + return (*funcref)(fd, attr) +} + +func error_Fremovexattr(fd int, attr string) (err error) { + err = ENOSYS return } @@ -29,10 +112,12 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -46,31 +131,159 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) + +var Fgetxattr = enter_Fgetxattr + +func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + funcref := get_FgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { + *funcref = impl_Fgetxattr + } else { + *funcref = error_Fgetxattr + } + return (*funcref)(fd, attr, dest) +} + +func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(data) > 0 { + _p1 = unsafe.Pointer(&data[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) + +var Fsetxattr = enter_Fsetxattr + +func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + funcref := get_FsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { + *funcref = impl_Fsetxattr + } else { + *funcref = error_Fsetxattr } + return (*funcref)(fd, attr, data, flag) +} + +func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) + +var accept4 = enter_accept4 + +func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + funcref := get_accept4Addr() + if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { + *funcref = impl_accept4 + } else { + *funcref = error_accept4 } + return (*funcref)(s, rsa, addrlen, flags) +} + +func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -78,9 +291,11 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -88,10 +303,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -99,9 +314,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -109,9 +324,11 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -119,9 +336,11 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -129,10 +348,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -140,9 +359,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -150,9 +369,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -160,10 +379,52 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RemovexattrAddr() *(func(path string, attr string) (err error)) + +var Removexattr = enter_Removexattr + +func enter_Removexattr(path string, attr string) (err error) { + funcref := get_RemovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Removexattr + } else { + *funcref = error_Removexattr } + return (*funcref)(path, attr) +} + +func error_Removexattr(path string, attr string) (err error) { + err = ENOSYS return } @@ -176,10 +437,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -193,9 +456,11 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -203,10 +468,12 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -214,10 +481,12 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -225,10 +494,12 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.ExitSyscall() ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -236,9 +507,11 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -246,9 +519,11 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req int, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -256,9 +531,62 @@ func ioctl(fd int, req int, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) + runtime.ExitSyscall() + ret = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + result = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) + runtime.ExitSyscall() + id = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -271,9 +599,11 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -286,9 +616,11 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -301,9 +633,11 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -316,9 +650,11 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -331,10 +667,12 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -342,10 +680,12 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -353,617 +693,2216 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Errno2() (er2 int) { - uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) - er2 = int(uer2) +func impl_Dup3(oldfd int, newfd int, flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) -func Err2ad() (eadd *int) { - ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) - eadd = (*int)(unsafe.Pointer(ueadd)) - return -} +var Dup3 = enter_Dup3 -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func enter_Dup3(oldfd int, newfd int, flags int) (err error) { + funcref := get_Dup3Addr() + if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { + *funcref = impl_Dup3 + } else { + *funcref = error_Dup3 + } + return (*funcref)(oldfd, newfd, flags) +} -func Exit(code int) { - syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) +func error_Dup3(oldfd int, newfd int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Dirfd(dirp uintptr) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Dirfd = enter_Dirfd + +func enter_Dirfd(dirp uintptr) (fd int, err error) { + funcref := get_DirfdAddr() + if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { + *funcref = impl_Dirfd + } else { + *funcref = error_Dirfd } + return (*funcref)(dirp) +} + +func error_Dirfd(dirp uintptr) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate(size int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreateAddr() *(func(size int) (fd int, err error)) -func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - retval = int(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate = enter_EpollCreate + +func enter_EpollCreate(size int) (fd int, err error) { + funcref := get_EpollCreateAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { + *funcref = impl_EpollCreate + } else { + *funcref = error_EpollCreate } + return (*funcref)(size) +} + +func error_EpollCreate(size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_LE_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) -func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate1 = enter_EpollCreate1 + +func enter_EpollCreate1(flags int) (fd int, err error) { + funcref := get_EpollCreate1Addr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { + *funcref = impl_EpollCreate1 + } else { + *funcref = error_EpollCreate1 } + return (*funcref)(flags) +} + +func error_EpollCreate1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCtl = enter_EpollCtl + +func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + funcref := get_EpollCtlAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { + *funcref = impl_EpollCtl + } else { + *funcref = error_EpollCtl } - return + return (*funcref)(epfd, op, fd, event) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpagesize() (pgsize int) { - r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) - pgsize = int(r0) +func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mprotect(b []byte, prot int) (err error) { +func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) +var EpollPwait = enter_EpollPwait + +func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + funcref := get_EpollPwaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { + *funcref = impl_EpollPwait } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) + *funcref = error_EpollPwait } + return (*funcref)(epfd, events, msec, sigmask) +} + +func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Poll(fds []PollFd, timeout int) (n int, err error) { +func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer - if len(fds) > 0 { - _p0 = unsafe.Pointer(&fds[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollWait = enter_EpollWait + +func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + funcref := get_EpollWaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { + *funcref = impl_EpollWait + } else { + *funcref = error_EpollWait } + return (*funcref)(epfd, events, msec) +} + +func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Errno2() (er2 int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) + runtime.ExitSyscall() + er2 = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Eventfd(initval uint, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) + +var Eventfd = enter_Eventfd + +func enter_Eventfd(initval uint, flags int) (fd int, err error) { + funcref := get_EventfdAddr() + if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { + *funcref = impl_Eventfd + } else { + *funcref = error_Eventfd + } + return (*funcref)(initval, flags) +} + +func error_Eventfd(initval uint, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { +func Exit(code int) { + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) + runtime.ExitSyscall() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(filesystem) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - var _p3 *byte - _p3, err = BytePtrFromString(parm) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) -func unmount(filesystem string, mtm int) (err error) { +var Faccessat = enter_Faccessat + +func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FaccessatAddr() + if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { + *funcref = impl_Faccessat + } else { + *funcref = error_Faccessat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(filesystem) + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Fchmodat = enter_Fchmodat + +func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FchmodatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { + *funcref = impl_Fchmodat + } else { + *funcref = error_Fchmodat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chroot(path string) (err error) { +func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) + +var Fchownat = enter_Fchownat + +func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + funcref := get_FchownatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { + *funcref = impl_Fchownat + } else { + *funcref = error_Fchownat } + return (*funcref)(fd, path, uid, gid, flags) +} + +func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Uname(buf *Utsname) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() + retval = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gethostname(buf []byte) (err error) { +func impl_Fdatasync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FdatasyncAddr() *(func(fd int) (err error)) + +var Fdatasync = enter_Fdatasync + +func enter_Fdatasync(fd int) (err error) { + funcref := get_FdatasyncAddr() + if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { + *funcref = impl_Fdatasync + } else { + *funcref = error_Fdatasync + } + return (*funcref)(fd) +} + +func error_Fdatasync(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) + +var fstatat = enter_fstatat + +func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + funcref := get_fstatatAddr() + if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { + *funcref = impl_fstatat + } else { + *funcref = error_fstatat + } + return (*funcref)(dirfd, path, stat, flags) +} + +func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) + +var Lgetxattr = enter_Lgetxattr + +func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + funcref := get_LgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { + *funcref = impl_Lgetxattr + } else { + *funcref = error_Lgetxattr + } + return (*funcref)(link, attr, dest) +} + +func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Lsetxattr = enter_Lsetxattr + +func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_LsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { + *funcref = impl_Lsetxattr + } else { + *funcref = error_Lsetxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) + +var Fstatfs = enter_Fstatfs + +func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { + funcref := get_FstatfsAddr() + if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { + *funcref = impl_Fstatfs + } else { + *funcref = error_Fstatfs + } + return (*funcref)(fd, buf) +} + +func error_Fstatfs(fd int, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Futimes(fd int, tv []Timeval) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(tv) > 0 { + _p0 = unsafe.Pointer(&tv[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) -func Getegid() (egid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) +var Futimes = enter_Futimes + +func enter_Futimes(fd int, tv []Timeval) (err error) { + funcref := get_FutimesAddr() + if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { + *funcref = impl_Futimes + } else { + *funcref = error_Futimes + } + return (*funcref)(fd, tv) +} + +func error_Futimes(fd int, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Geteuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) +func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) + +var Futimesat = enter_Futimesat + +func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + funcref := get_FutimesatAddr() + if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { + *funcref = impl_Futimesat + } else { + *funcref = error_Futimesat + } + return (*funcref)(dirfd, path, tv) +} + +func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getgid() (gid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) +func Ftruncate(fd int, length int64) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) +func impl_Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) + +var Getrandom = enter_Getrandom + +func enter_Getrandom(buf []byte, flags int) (n int, err error) { + funcref := get_GetrandomAddr() + if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { + *funcref = impl_Getrandom + } else { + *funcref = error_Getrandom + } + return (*funcref)(buf, flags) +} + +func error_Getrandom(buf []byte, flags int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_InotifyInit() (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInitAddr() *(func() (fd int, err error)) + +var InotifyInit = enter_InotifyInit + +func enter_InotifyInit() (fd int, err error) { + funcref := get_InotifyInitAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { + *funcref = impl_InotifyInit + } else { + *funcref = error_InotifyInit } + return (*funcref)() +} + +func error_InotifyInit() (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getppid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) - pid = int(r0) +func impl_InotifyInit1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) + +var InotifyInit1 = enter_InotifyInit1 + +func enter_InotifyInit1(flags int) (fd int, err error) { + funcref := get_InotifyInit1Addr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { + *funcref = impl_InotifyInit1 + } else { + *funcref = error_InotifyInit1 + } + return (*funcref)(flags) +} + +func error_InotifyInit1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + runtime.ExitSyscall() + watchdesc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) + +var InotifyAddWatch = enter_InotifyAddWatch + +func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + funcref := get_InotifyAddWatchAddr() + if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { + *funcref = impl_InotifyAddWatch + } else { + *funcref = error_InotifyAddWatch + } + return (*funcref)(fd, pathname, mask) +} + +func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + watchdesc = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) + runtime.ExitSyscall() + success = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) + +var InotifyRmWatch = enter_InotifyRmWatch + +func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + funcref := get_InotifyRmWatchAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { + *funcref = impl_InotifyRmWatch + } else { + *funcref = error_InotifyRmWatch + } + return (*funcref)(fd, watchdesc) +} + +func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + success = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Listxattr = enter_Listxattr + +func enter_Listxattr(path string, dest []byte) (sz int, err error) { + funcref := get_ListxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { + *funcref = impl_Listxattr + } else { + *funcref = error_Listxattr + } + return (*funcref)(path, dest) +} + +func error_Listxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Llistxattr = enter_Llistxattr + +func enter_Llistxattr(path string, dest []byte) (sz int, err error) { + funcref := get_LlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Llistxattr + } else { + *funcref = error_Llistxattr + } + return (*funcref)(path, dest) +} + +func error_Llistxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LremovexattrAddr() *(func(path string, attr string) (err error)) + +var Lremovexattr = enter_Lremovexattr + +func enter_Lremovexattr(path string, attr string) (err error) { + funcref := get_LremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Lremovexattr + } else { + *funcref = error_Lremovexattr + } + return (*funcref)(path, attr) +} + +func error_Lremovexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lutimes(path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) + +var Lutimes = enter_Lutimes + +func enter_Lutimes(path string, tv []Timeval) (err error) { + funcref := get_LutimesAddr() + if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { + *funcref = impl_Lutimes + } else { + *funcref = error_Lutimes + } + return (*funcref)(path, tv) +} + +func error_Lutimes(path string, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) + +var Statfs = enter_Statfs + +func enter_Statfs(path string, buf *Statfs_t) (err error) { + funcref := get_StatfsAddr() + if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { + *funcref = impl_Statfs + } else { + *funcref = error_Statfs + } + return (*funcref)(path, buf) +} + +func error_Statfs(path string, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Syncfs(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SyncfsAddr() *(func(fd int) (err error)) + +var Syncfs = enter_Syncfs + +func enter_Syncfs(fd int) (err error) { + funcref := get_SyncfsAddr() + if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { + *funcref = impl_Syncfs + } else { + *funcref = error_Syncfs + } + return (*funcref)(fd) +} + +func error_Syncfs(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) + runtime.ExitSyscall() + ticks = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount_LE(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + runtime.ExitSyscall() + ret = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unshare(flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnshareAddr() *(func(flags int) (err error)) + +var Unshare = enter_Unshare + +func enter_Unshare(flags int) (err error) { + funcref := get_UnshareAddr() + if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { + *funcref = impl_Unshare + } else { + *funcref = error_Unshare + } + return (*funcref)(flags) +} + +func error_Unshare(flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) + pgid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) + runtime.ExitSyscall() + prio = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) + runtime.ExitSyscall() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) + runtime.ExitSyscall() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) + sid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) + +var Linkat = enter_Linkat + +func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + funcref := get_LinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { + *funcref = impl_Linkat + } else { + *funcref = error_Linkat + } + return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) +} + +func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkdirat = enter_Mkdirat + +func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkdiratAddr() + if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { + *funcref = impl_Mkdirat + } else { + *funcref = error_Mkdirat + } + return (*funcref)(dirfd, path, mode) +} + +func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) + +var Mknodat = enter_Mknodat + +func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + funcref := get_MknodatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { + *funcref = impl_Mknodat + } else { + *funcref = error_Mknodat + } + return (*funcref)(dirfd, path, mode, dev) +} + +func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_PivotRoot(newroot string, oldroot string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(oldroot) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) + +var PivotRoot = enter_PivotRoot + +func enter_PivotRoot(newroot string, oldroot string) (err error) { + funcref := get_PivotRootAddr() + if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { + *funcref = impl_PivotRoot + } else { + *funcref = error_PivotRoot + } + return (*funcref)(newroot, oldroot) +} + +func error_PivotRoot(newroot string, oldroot string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) -func getrusage(who int, rusage *rusage_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prctl = enter_Prctl + +func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + funcref := get_PrctlAddr() + if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { + *funcref = impl_Prctl + } else { + *funcref = error_Prctl } - return + return (*funcref)(option, arg2, arg3, arg4, arg5) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) +func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) -func Kill(pid int, sig Signal) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prlimit = enter_Prlimit + +func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + funcref := get_PrlimitAddr() + if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { + *funcref = impl_Prlimit + } else { + *funcref = error_Prlimit } + return (*funcref)(pid, resource, newlimit, old) +} + +func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { +func Rename(from string, to string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Link(path string, link string) (err error) { +func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) if err != nil { return } var _p1 *byte - _p1, err = BytePtrFromString(link) + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) -func Listen(s int, n int) (err error) { - _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat = enter_Renameat + +func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + funcref := get_RenameatAddr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { + *funcref = impl_Renameat + } else { + *funcref = error_Renameat } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath) +} + +func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_LE_t) (err error) { +func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat2 = enter_Renameat2 + +func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + funcref := get_Renameat2Addr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { + *funcref = impl_Renameat2 + } else { + *funcref = error_Renameat2 } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) +func Seek(fd int, offset int64, whence int) (off int64, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.ExitSyscall() + off = int64(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Setegid(egid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func impl_Sethostname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SethostnameAddr() *(func(p []byte) (err error)) -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) +var Sethostname = enter_Sethostname + +func enter_Sethostname(p []byte) (err error) { + funcref := get_SethostnameAddr() + if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { + *funcref = impl_Sethostname } else { - _p1 = unsafe.Pointer(&_zero) + *funcref = error_Sethostname } - r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return + return (*funcref)(p) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Sethostname(p []byte) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Setns(fd int, nstype int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SetnsAddr() *(func(fd int, nstype int) (err error)) -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) +var Setns = enter_Setns + +func enter_Setns(fd int, nstype int) (err error) { + funcref := get_SetnsAddr() + if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { + *funcref = impl_Setns + } else { + *funcref = error_Setns } + return (*funcref)(fd, nstype) +} + +func error_Setns(fd int, nstype int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -971,9 +2910,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -981,9 +2920,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -991,9 +2930,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1001,9 +2940,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1011,10 +2950,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1022,9 +2961,11 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1032,9 +2973,11 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1042,9 +2985,11 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1057,9 +3002,11 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1077,17 +3024,63 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) + +var Symlinkat = enter_Symlinkat + +func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + funcref := get_SymlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { + *funcref = impl_Symlinkat + } else { + *funcref = error_Symlinkat + } + return (*funcref)(oldPath, dirfd, newPath) +} + +func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - syscall_syscall(SYS_SYNC, 0, 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) + runtime.ExitSyscall() return } @@ -1099,9 +3092,11 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1109,9 +3104,11 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1119,9 +3116,11 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1129,7 +3128,9 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) + runtime.ExitSyscall() oldmask = int(r0) return } @@ -1142,10 +3143,49 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) + +var Unlinkat = enter_Unlinkat + +func enter_Unlinkat(dirfd int, path string, flags int) (err error) { + funcref := get_UnlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { + *funcref = impl_Unlinkat + } else { + *funcref = error_Unlinkat } + return (*funcref)(dirfd, path, flags) +} + +func error_Unlinkat(dirfd int, path string, flags int) (err error) { + err = ENOSYS return } @@ -1157,9 +3197,11 @@ func Utime(path string, utim *Utimbuf) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1172,11 +3214,91 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) + +var openat = enter_openat + +func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + funcref := get_openatAddr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { + *funcref = impl_openat + } else { + *funcref = error_openat + } + return (*funcref)(dirfd, path, flags, mode) +} + +func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) + +var openat2 = enter_openat2 + +func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + funcref := get_openat2Addr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { + *funcref = impl_openat2 + } else { + *funcref = error_openat2 } + return (*funcref)(dirfd, path, open_how, size) +} + +func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } @@ -1188,9 +3310,23 @@ func remove(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitid(idType int, id int, info *Siginfo, options int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1198,10 +3334,12 @@ func remove(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.ExitSyscall() wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1209,9 +3347,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1219,9 +3357,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1234,20 +3372,87 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) + +var utimensat = enter_utimensat + +func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + funcref := get_utimensatAddr() + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + *funcref = impl_utimensat + } else { + *funcref = error_utimensat + } + return (*funcref)(dirfd, path, ts, flags) +} + +func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Posix_openpt(oflag int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Grantpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlockpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 0cc3ce49..53aef5dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -452,4 +452,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 856d92d6..71d52476 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -374,4 +374,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 8d467094..c7477061 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -416,4 +416,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index edc17324..f96e214f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -319,4 +319,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 445eba20..28425346 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -313,4 +313,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index adba01bc..d0953018 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 014c4e9c..295c7f4b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index ccc97d74..d1a9eaca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ec2b64a9..bec157c3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 21a839e3..7ee7bdc4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -443,4 +443,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index c11121ec..fad1f25b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 909b631f..7d3e1635 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index e49bed16..0ed53ad9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -320,4 +320,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 66017d2d..2fba04ad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -381,4 +381,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 47bab18d..621d00d7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -394,4 +394,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index b2e30858..5e8c263c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2669 +1,2852 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x package unix -// TODO: auto-generate. - const ( - SYS_ACOSD128 = 0xB80 - SYS_ACOSD32 = 0xB7E - SYS_ACOSD64 = 0xB7F - SYS_ACOSHD128 = 0xB83 - SYS_ACOSHD32 = 0xB81 - SYS_ACOSHD64 = 0xB82 - SYS_AIO_FSYNC = 0xC69 - SYS_ASCTIME = 0x0AE - SYS_ASCTIME64 = 0xCD7 - SYS_ASCTIME64_R = 0xCD8 - SYS_ASIND128 = 0xB86 - SYS_ASIND32 = 0xB84 - SYS_ASIND64 = 0xB85 - SYS_ASINHD128 = 0xB89 - SYS_ASINHD32 = 0xB87 - SYS_ASINHD64 = 0xB88 - SYS_ATAN2D128 = 0xB8F - SYS_ATAN2D32 = 0xB8D - SYS_ATAN2D64 = 0xB8E - SYS_ATAND128 = 0xB8C - SYS_ATAND32 = 0xB8A - SYS_ATAND64 = 0xB8B - SYS_ATANHD128 = 0xB92 - SYS_ATANHD32 = 0xB90 - SYS_ATANHD64 = 0xB91 - SYS_BIND2ADDRSEL = 0xD59 - SYS_C16RTOMB = 0xD40 - SYS_C32RTOMB = 0xD41 - SYS_CBRTD128 = 0xB95 - SYS_CBRTD32 = 0xB93 - SYS_CBRTD64 = 0xB94 - SYS_CEILD128 = 0xB98 - SYS_CEILD32 = 0xB96 - SYS_CEILD64 = 0xB97 - SYS_CLEARENV = 0x0C9 - SYS_CLEARERR_UNLOCKED = 0xCA1 - SYS_CLOCK = 0x0AA - SYS_CLOGL = 0xA00 - SYS_CLRMEMF = 0x0BD - SYS_CONJ = 0xA03 - SYS_CONJF = 0xA06 - SYS_CONJL = 0xA09 - SYS_COPYSIGND128 = 0xB9E - SYS_COPYSIGND32 = 0xB9C - SYS_COPYSIGND64 = 0xB9D - SYS_COSD128 = 0xBA1 - SYS_COSD32 = 0xB9F - SYS_COSD64 = 0xBA0 - SYS_COSHD128 = 0xBA4 - SYS_COSHD32 = 0xBA2 - SYS_COSHD64 = 0xBA3 - SYS_CPOW = 0xA0C - SYS_CPOWF = 0xA0F - SYS_CPOWL = 0xA12 - SYS_CPROJ = 0xA15 - SYS_CPROJF = 0xA18 - SYS_CPROJL = 0xA1B - SYS_CREAL = 0xA1E - SYS_CREALF = 0xA21 - SYS_CREALL = 0xA24 - SYS_CSIN = 0xA27 - SYS_CSINF = 0xA2A - SYS_CSINH = 0xA30 - SYS_CSINHF = 0xA33 - SYS_CSINHL = 0xA36 - SYS_CSINL = 0xA2D - SYS_CSNAP = 0x0C5 - SYS_CSQRT = 0xA39 - SYS_CSQRTF = 0xA3C - SYS_CSQRTL = 0xA3F - SYS_CTAN = 0xA42 - SYS_CTANF = 0xA45 - SYS_CTANH = 0xA4B - SYS_CTANHF = 0xA4E - SYS_CTANHL = 0xA51 - SYS_CTANL = 0xA48 - SYS_CTIME = 0x0AB - SYS_CTIME64 = 0xCD9 - SYS_CTIME64_R = 0xCDA - SYS_CTRACE = 0x0C6 - SYS_DIFFTIME = 0x0A7 - SYS_DIFFTIME64 = 0xCDB - SYS_DLADDR = 0xC82 - SYS_DYNALLOC = 0x0C3 - SYS_DYNFREE = 0x0C2 - SYS_ERFCD128 = 0xBAA - SYS_ERFCD32 = 0xBA8 - SYS_ERFCD64 = 0xBA9 - SYS_ERFD128 = 0xBA7 - SYS_ERFD32 = 0xBA5 - SYS_ERFD64 = 0xBA6 - SYS_EXP2D128 = 0xBB0 - SYS_EXP2D32 = 0xBAE - SYS_EXP2D64 = 0xBAF - SYS_EXPD128 = 0xBAD - SYS_EXPD32 = 0xBAB - SYS_EXPD64 = 0xBAC - SYS_EXPM1D128 = 0xBB3 - SYS_EXPM1D32 = 0xBB1 - SYS_EXPM1D64 = 0xBB2 - SYS_FABSD128 = 0xBB6 - SYS_FABSD32 = 0xBB4 - SYS_FABSD64 = 0xBB5 - SYS_FDELREC_UNLOCKED = 0xCA2 - SYS_FDIMD128 = 0xBB9 - SYS_FDIMD32 = 0xBB7 - SYS_FDIMD64 = 0xBB8 - SYS_FDOPEN_UNLOCKED = 0xCFC - SYS_FECLEAREXCEPT = 0xAEA - SYS_FEGETENV = 0xAEB - SYS_FEGETEXCEPTFLAG = 0xAEC - SYS_FEGETROUND = 0xAED - SYS_FEHOLDEXCEPT = 0xAEE - SYS_FEOF_UNLOCKED = 0xCA3 - SYS_FERAISEEXCEPT = 0xAEF - SYS_FERROR_UNLOCKED = 0xCA4 - SYS_FESETENV = 0xAF0 - SYS_FESETEXCEPTFLAG = 0xAF1 - SYS_FESETROUND = 0xAF2 - SYS_FETCHEP = 0x0BF - SYS_FETESTEXCEPT = 0xAF3 - SYS_FEUPDATEENV = 0xAF4 - SYS_FE_DEC_GETROUND = 0xBBA - SYS_FE_DEC_SETROUND = 0xBBB - SYS_FFLUSH_UNLOCKED = 0xCA5 - SYS_FGETC_UNLOCKED = 0xC80 - SYS_FGETPOS64 = 0xCEE - SYS_FGETPOS64_UNLOCKED = 0xCF4 - SYS_FGETPOS_UNLOCKED = 0xCA6 - SYS_FGETS_UNLOCKED = 0xC7C - SYS_FGETWC_UNLOCKED = 0xCA7 - SYS_FGETWS_UNLOCKED = 0xCA8 - SYS_FILENO_UNLOCKED = 0xCA9 - SYS_FLDATA = 0x0C1 - SYS_FLDATA_UNLOCKED = 0xCAA - SYS_FLOCATE_UNLOCKED = 0xCAB - SYS_FLOORD128 = 0xBBE - SYS_FLOORD32 = 0xBBC - SYS_FLOORD64 = 0xBBD - SYS_FMA = 0xA63 - SYS_FMAD128 = 0xBC1 - SYS_FMAD32 = 0xBBF - SYS_FMAD64 = 0xBC0 - SYS_FMAF = 0xA66 - SYS_FMAL = 0xA69 - SYS_FMAX = 0xA6C - SYS_FMAXD128 = 0xBC4 - SYS_FMAXD32 = 0xBC2 - SYS_FMAXD64 = 0xBC3 - SYS_FMAXF = 0xA6F - SYS_FMAXL = 0xA72 - SYS_FMIN = 0xA75 - SYS_FMIND128 = 0xBC7 - SYS_FMIND32 = 0xBC5 - SYS_FMIND64 = 0xBC6 - SYS_FMINF = 0xA78 - SYS_FMINL = 0xA7B - SYS_FMODD128 = 0xBCA - SYS_FMODD32 = 0xBC8 - SYS_FMODD64 = 0xBC9 - SYS_FOPEN64 = 0xD49 - SYS_FOPEN64_UNLOCKED = 0xD4A - SYS_FOPEN_UNLOCKED = 0xCFA - SYS_FPRINTF_UNLOCKED = 0xCAC - SYS_FPUTC_UNLOCKED = 0xC81 - SYS_FPUTS_UNLOCKED = 0xC7E - SYS_FPUTWC_UNLOCKED = 0xCAD - SYS_FPUTWS_UNLOCKED = 0xCAE - SYS_FREAD_NOUPDATE = 0xCEC - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED - SYS_FREAD_UNLOCKED = 0xC7B - SYS_FREEIFADDRS = 0xCE6 - SYS_FREOPEN64 = 0xD4B - SYS_FREOPEN64_UNLOCKED = 0xD4C - SYS_FREOPEN_UNLOCKED = 0xCFB - SYS_FREXPD128 = 0xBCE - SYS_FREXPD32 = 0xBCC - SYS_FREXPD64 = 0xBCD - SYS_FSCANF_UNLOCKED = 0xCAF - SYS_FSEEK64 = 0xCEF - SYS_FSEEK64_UNLOCKED = 0xCF5 - SYS_FSEEKO64 = 0xCF0 - SYS_FSEEKO64_UNLOCKED = 0xCF6 - SYS_FSEEKO_UNLOCKED = 0xCB1 - SYS_FSEEK_UNLOCKED = 0xCB0 - SYS_FSETPOS64 = 0xCF1 - SYS_FSETPOS64_UNLOCKED = 0xCF7 - SYS_FSETPOS_UNLOCKED = 0xCB3 - SYS_FTELL64 = 0xCF2 - SYS_FTELL64_UNLOCKED = 0xCF8 - SYS_FTELLO64 = 0xCF3 - SYS_FTELLO64_UNLOCKED = 0xCF9 - SYS_FTELLO_UNLOCKED = 0xCB5 - SYS_FTELL_UNLOCKED = 0xCB4 - SYS_FUPDATE = 0x0B5 - SYS_FUPDATE_UNLOCKED = 0xCB7 - SYS_FWIDE_UNLOCKED = 0xCB8 - SYS_FWPRINTF_UNLOCKED = 0xCB9 - SYS_FWRITE_UNLOCKED = 0xC7A - SYS_FWSCANF_UNLOCKED = 0xCBA - SYS_GETDATE64 = 0xD4F - SYS_GETIFADDRS = 0xCE7 - SYS_GETIPV4SOURCEFILTER = 0xC77 - SYS_GETSOURCEFILTER = 0xC79 - SYS_GETSYNTX = 0x0FD - SYS_GETS_UNLOCKED = 0xC7D - SYS_GETTIMEOFDAY64 = 0xD50 - SYS_GETWCHAR_UNLOCKED = 0xCBC - SYS_GETWC_UNLOCKED = 0xCBB - SYS_GMTIME = 0x0B0 - SYS_GMTIME64 = 0xCDC - SYS_GMTIME64_R = 0xCDD - SYS_HYPOTD128 = 0xBD1 - SYS_HYPOTD32 = 0xBCF - SYS_HYPOTD64 = 0xBD0 - SYS_ILOGBD128 = 0xBD4 - SYS_ILOGBD32 = 0xBD2 - SYS_ILOGBD64 = 0xBD3 - SYS_ILOGBF = 0xA7E - SYS_ILOGBL = 0xA81 - SYS_INET6_IS_SRCADDR = 0xD5A - SYS_ISBLANK = 0x0FE - SYS_ISWALNUM = 0x0FF - SYS_LDEXPD128 = 0xBD7 - SYS_LDEXPD32 = 0xBD5 - SYS_LDEXPD64 = 0xBD6 - SYS_LGAMMAD128 = 0xBDA - SYS_LGAMMAD32 = 0xBD8 - SYS_LGAMMAD64 = 0xBD9 - SYS_LIO_LISTIO = 0xC6A - SYS_LLRINT = 0xA84 - SYS_LLRINTD128 = 0xBDD - SYS_LLRINTD32 = 0xBDB - SYS_LLRINTD64 = 0xBDC - SYS_LLRINTF = 0xA87 - SYS_LLRINTL = 0xA8A - SYS_LLROUND = 0xA8D - SYS_LLROUNDD128 = 0xBE0 - SYS_LLROUNDD32 = 0xBDE - SYS_LLROUNDD64 = 0xBDF - SYS_LLROUNDF = 0xA90 - SYS_LLROUNDL = 0xA93 - SYS_LOCALTIM = 0x0B1 - SYS_LOCALTIME = 0x0B1 - SYS_LOCALTIME64 = 0xCDE - SYS_LOCALTIME64_R = 0xCDF - SYS_LOG10D128 = 0xBE6 - SYS_LOG10D32 = 0xBE4 - SYS_LOG10D64 = 0xBE5 - SYS_LOG1PD128 = 0xBE9 - SYS_LOG1PD32 = 0xBE7 - SYS_LOG1PD64 = 0xBE8 - SYS_LOG2D128 = 0xBEC - SYS_LOG2D32 = 0xBEA - SYS_LOG2D64 = 0xBEB - SYS_LOGBD128 = 0xBEF - SYS_LOGBD32 = 0xBED - SYS_LOGBD64 = 0xBEE - SYS_LOGBF = 0xA96 - SYS_LOGBL = 0xA99 - SYS_LOGD128 = 0xBE3 - SYS_LOGD32 = 0xBE1 - SYS_LOGD64 = 0xBE2 - SYS_LRINT = 0xA9C - SYS_LRINTD128 = 0xBF2 - SYS_LRINTD32 = 0xBF0 - SYS_LRINTD64 = 0xBF1 - SYS_LRINTF = 0xA9F - SYS_LRINTL = 0xAA2 - SYS_LROUNDD128 = 0xBF5 - SYS_LROUNDD32 = 0xBF3 - SYS_LROUNDD64 = 0xBF4 - SYS_LROUNDL = 0xAA5 - SYS_MBLEN = 0x0AF - SYS_MBRTOC16 = 0xD42 - SYS_MBRTOC32 = 0xD43 - SYS_MEMSET = 0x0A3 - SYS_MKTIME = 0x0AC - SYS_MKTIME64 = 0xCE0 - SYS_MODFD128 = 0xBF8 - SYS_MODFD32 = 0xBF6 - SYS_MODFD64 = 0xBF7 - SYS_NAN = 0xAA8 - SYS_NAND128 = 0xBFB - SYS_NAND32 = 0xBF9 - SYS_NAND64 = 0xBFA - SYS_NANF = 0xAAA - SYS_NANL = 0xAAC - SYS_NEARBYINT = 0xAAE - SYS_NEARBYINTD128 = 0xBFE - SYS_NEARBYINTD32 = 0xBFC - SYS_NEARBYINTD64 = 0xBFD - SYS_NEARBYINTF = 0xAB1 - SYS_NEARBYINTL = 0xAB4 - SYS_NEXTAFTERD128 = 0xC01 - SYS_NEXTAFTERD32 = 0xBFF - SYS_NEXTAFTERD64 = 0xC00 - SYS_NEXTAFTERF = 0xAB7 - SYS_NEXTAFTERL = 0xABA - SYS_NEXTTOWARD = 0xABD - SYS_NEXTTOWARDD128 = 0xC04 - SYS_NEXTTOWARDD32 = 0xC02 - SYS_NEXTTOWARDD64 = 0xC03 - SYS_NEXTTOWARDF = 0xAC0 - SYS_NEXTTOWARDL = 0xAC3 - SYS_NL_LANGINFO = 0x0FC - SYS_PERROR_UNLOCKED = 0xCBD - SYS_POSIX_FALLOCATE = 0xCE8 - SYS_POSIX_MEMALIGN = 0xCE9 - SYS_POSIX_OPENPT = 0xC66 - SYS_POWD128 = 0xC07 - SYS_POWD32 = 0xC05 - SYS_POWD64 = 0xC06 - SYS_PRINTF_UNLOCKED = 0xCBE - SYS_PSELECT = 0xC67 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 - SYS_PUTS_UNLOCKED = 0xC7F - SYS_PUTWCHAR_UNLOCKED = 0xCC0 - SYS_PUTWC_UNLOCKED = 0xCBF - SYS_QUANTEXPD128 = 0xD46 - SYS_QUANTEXPD32 = 0xD44 - SYS_QUANTEXPD64 = 0xD45 - SYS_QUANTIZED128 = 0xC0A - SYS_QUANTIZED32 = 0xC08 - SYS_QUANTIZED64 = 0xC09 - SYS_REMAINDERD128 = 0xC0D - SYS_REMAINDERD32 = 0xC0B - SYS_REMAINDERD64 = 0xC0C - SYS_RESIZE_ALLOC = 0xCEB - SYS_REWIND_UNLOCKED = 0xCC1 - SYS_RINTD128 = 0xC13 - SYS_RINTD32 = 0xC11 - SYS_RINTD64 = 0xC12 - SYS_RINTF = 0xACB - SYS_RINTL = 0xACD - SYS_ROUND = 0xACF - SYS_ROUNDD128 = 0xC16 - SYS_ROUNDD32 = 0xC14 - SYS_ROUNDD64 = 0xC15 - SYS_ROUNDF = 0xAD2 - SYS_ROUNDL = 0xAD5 - SYS_SAMEQUANTUMD128 = 0xC19 - SYS_SAMEQUANTUMD32 = 0xC17 - SYS_SAMEQUANTUMD64 = 0xC18 - SYS_SCALBLN = 0xAD8 - SYS_SCALBLND128 = 0xC1C - SYS_SCALBLND32 = 0xC1A - SYS_SCALBLND64 = 0xC1B - SYS_SCALBLNF = 0xADB - SYS_SCALBLNL = 0xADE - SYS_SCALBND128 = 0xC1F - SYS_SCALBND32 = 0xC1D - SYS_SCALBND64 = 0xC1E - SYS_SCALBNF = 0xAE3 - SYS_SCALBNL = 0xAE6 - SYS_SCANF_UNLOCKED = 0xCC2 - SYS_SCHED_YIELD = 0xB32 - SYS_SETENV = 0x0C8 - SYS_SETIPV4SOURCEFILTER = 0xC76 - SYS_SETSOURCEFILTER = 0xC78 - SYS_SHM_OPEN = 0xC8C - SYS_SHM_UNLINK = 0xC8D - SYS_SIND128 = 0xC22 - SYS_SIND32 = 0xC20 - SYS_SIND64 = 0xC21 - SYS_SINHD128 = 0xC25 - SYS_SINHD32 = 0xC23 - SYS_SINHD64 = 0xC24 - SYS_SIZEOF_ALLOC = 0xCEA - SYS_SOCKATMARK = 0xC68 - SYS_SQRTD128 = 0xC28 - SYS_SQRTD32 = 0xC26 - SYS_SQRTD64 = 0xC27 - SYS_STRCHR = 0x0A0 - SYS_STRCSPN = 0x0A1 - SYS_STRERROR = 0x0A8 - SYS_STRERROR_R = 0xB33 - SYS_STRFTIME = 0x0B2 - SYS_STRLEN = 0x0A9 - SYS_STRPBRK = 0x0A2 - SYS_STRSPN = 0x0A4 - SYS_STRSTR = 0x0A5 - SYS_STRTOD128 = 0xC2B - SYS_STRTOD32 = 0xC29 - SYS_STRTOD64 = 0xC2A - SYS_STRTOK = 0x0A6 - SYS_TAND128 = 0xC2E - SYS_TAND32 = 0xC2C - SYS_TAND64 = 0xC2D - SYS_TANHD128 = 0xC31 - SYS_TANHD32 = 0xC2F - SYS_TANHD64 = 0xC30 - SYS_TGAMMAD128 = 0xC34 - SYS_TGAMMAD32 = 0xC32 - SYS_TGAMMAD64 = 0xC33 - SYS_TIME = 0x0AD - SYS_TIME64 = 0xCE1 - SYS_TMPFILE64 = 0xD4D - SYS_TMPFILE64_UNLOCKED = 0xD4E - SYS_TMPFILE_UNLOCKED = 0xCFD - SYS_TRUNCD128 = 0xC40 - SYS_TRUNCD32 = 0xC3E - SYS_TRUNCD64 = 0xC3F - SYS_UNGETC_UNLOCKED = 0xCC3 - SYS_UNGETWC_UNLOCKED = 0xCC4 - SYS_UNSETENV = 0xB34 - SYS_VFPRINTF_UNLOCKED = 0xCC5 - SYS_VFSCANF_UNLOCKED = 0xCC7 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 - SYS_VFWSCANF_UNLOCKED = 0xCCB - SYS_VPRINTF_UNLOCKED = 0xCCD - SYS_VSCANF_UNLOCKED = 0xCCF - SYS_VWPRINTF_UNLOCKED = 0xCD1 - SYS_VWSCANF_UNLOCKED = 0xCD3 - SYS_WCSTOD128 = 0xC43 - SYS_WCSTOD32 = 0xC41 - SYS_WCSTOD64 = 0xC42 - SYS_WPRINTF_UNLOCKED = 0xCD5 - SYS_WSCANF_UNLOCKED = 0xCD6 - SYS__FLUSHLBF = 0xD68 - SYS__FLUSHLBF_UNLOCKED = 0xD6F - SYS___ACOSHF_H = 0xA54 - SYS___ACOSHL_H = 0xA55 - SYS___ASINHF_H = 0xA56 - SYS___ASINHL_H = 0xA57 - SYS___ATANPID128 = 0xC6D - SYS___ATANPID32 = 0xC6B - SYS___ATANPID64 = 0xC6C - SYS___CBRTF_H = 0xA58 - SYS___CBRTL_H = 0xA59 - SYS___CDUMP = 0x0C4 - SYS___CLASS = 0xAFA - SYS___CLASS2 = 0xB99 - SYS___CLASS2D128 = 0xC99 - SYS___CLASS2D32 = 0xC97 - SYS___CLASS2D64 = 0xC98 - SYS___CLASS2F = 0xC91 - SYS___CLASS2F_B = 0xC93 - SYS___CLASS2F_H = 0xC94 - SYS___CLASS2L = 0xC92 - SYS___CLASS2L_B = 0xC95 - SYS___CLASS2L_H = 0xC96 - SYS___CLASS2_B = 0xB9A - SYS___CLASS2_H = 0xB9B - SYS___CLASS_B = 0xAFB - SYS___CLASS_H = 0xAFC - SYS___CLOGL_B = 0xA01 - SYS___CLOGL_H = 0xA02 - SYS___CLRENV = 0x0C9 - SYS___CLRMF = 0x0BD - SYS___CODEPAGE_INFO = 0xC64 - SYS___CONJF_B = 0xA07 - SYS___CONJF_H = 0xA08 - SYS___CONJL_B = 0xA0A - SYS___CONJL_H = 0xA0B - SYS___CONJ_B = 0xA04 - SYS___CONJ_H = 0xA05 - SYS___COPYSIGN_B = 0xA5A - SYS___COPYSIGN_H = 0xAF5 - SYS___COSPID128 = 0xC70 - SYS___COSPID32 = 0xC6E - SYS___COSPID64 = 0xC6F - SYS___CPOWF_B = 0xA10 - SYS___CPOWF_H = 0xA11 - SYS___CPOWL_B = 0xA13 - SYS___CPOWL_H = 0xA14 - SYS___CPOW_B = 0xA0D - SYS___CPOW_H = 0xA0E - SYS___CPROJF_B = 0xA19 - SYS___CPROJF_H = 0xA1A - SYS___CPROJL_B = 0xA1C - SYS___CPROJL_H = 0xA1D - SYS___CPROJ_B = 0xA16 - SYS___CPROJ_H = 0xA17 - SYS___CREALF_B = 0xA22 - SYS___CREALF_H = 0xA23 - SYS___CREALL_B = 0xA25 - SYS___CREALL_H = 0xA26 - SYS___CREAL_B = 0xA1F - SYS___CREAL_H = 0xA20 - SYS___CSINF_B = 0xA2B - SYS___CSINF_H = 0xA2C - SYS___CSINHF_B = 0xA34 - SYS___CSINHF_H = 0xA35 - SYS___CSINHL_B = 0xA37 - SYS___CSINHL_H = 0xA38 - SYS___CSINH_B = 0xA31 - SYS___CSINH_H = 0xA32 - SYS___CSINL_B = 0xA2E - SYS___CSINL_H = 0xA2F - SYS___CSIN_B = 0xA28 - SYS___CSIN_H = 0xA29 - SYS___CSNAP = 0x0C5 - SYS___CSQRTF_B = 0xA3D - SYS___CSQRTF_H = 0xA3E - SYS___CSQRTL_B = 0xA40 - SYS___CSQRTL_H = 0xA41 - SYS___CSQRT_B = 0xA3A - SYS___CSQRT_H = 0xA3B - SYS___CTANF_B = 0xA46 - SYS___CTANF_H = 0xA47 - SYS___CTANHF_B = 0xA4F - SYS___CTANHF_H = 0xA50 - SYS___CTANHL_B = 0xA52 - SYS___CTANHL_H = 0xA53 - SYS___CTANH_B = 0xA4C - SYS___CTANH_H = 0xA4D - SYS___CTANL_B = 0xA49 - SYS___CTANL_H = 0xA4A - SYS___CTAN_B = 0xA43 - SYS___CTAN_H = 0xA44 - SYS___CTEST = 0x0C7 - SYS___CTRACE = 0x0C6 - SYS___D1TOP = 0xC9B - SYS___D2TOP = 0xC9C - SYS___D4TOP = 0xC9D - SYS___DYNALL = 0x0C3 - SYS___DYNFRE = 0x0C2 - SYS___EXP2F_H = 0xA5E - SYS___EXP2L_H = 0xA5F - SYS___EXP2_H = 0xA5D - SYS___EXPM1F_H = 0xA5B - SYS___EXPM1L_H = 0xA5C - SYS___FBUFSIZE = 0xD60 - SYS___FLBF = 0xD62 - SYS___FLDATA = 0x0C1 - SYS___FMAF_B = 0xA67 - SYS___FMAF_H = 0xA68 - SYS___FMAL_B = 0xA6A - SYS___FMAL_H = 0xA6B - SYS___FMAXF_B = 0xA70 - SYS___FMAXF_H = 0xA71 - SYS___FMAXL_B = 0xA73 - SYS___FMAXL_H = 0xA74 - SYS___FMAX_B = 0xA6D - SYS___FMAX_H = 0xA6E - SYS___FMA_B = 0xA64 - SYS___FMA_H = 0xA65 - SYS___FMINF_B = 0xA79 - SYS___FMINF_H = 0xA7A - SYS___FMINL_B = 0xA7C - SYS___FMINL_H = 0xA7D - SYS___FMIN_B = 0xA76 - SYS___FMIN_H = 0xA77 - SYS___FPENDING = 0xD61 - SYS___FPENDING_UNLOCKED = 0xD6C - SYS___FPURGE = 0xD69 - SYS___FPURGE_UNLOCKED = 0xD70 - SYS___FP_CAST_D = 0xBCB - SYS___FREADABLE = 0xD63 - SYS___FREADAHEAD = 0xD6A - SYS___FREADAHEAD_UNLOCKED = 0xD71 - SYS___FREADING = 0xD65 - SYS___FREADING_UNLOCKED = 0xD6D - SYS___FSEEK2 = 0xB3C - SYS___FSETERR = 0xD6B - SYS___FSETLOCKING = 0xD67 - SYS___FTCHEP = 0x0BF - SYS___FTELL2 = 0xB3B - SYS___FUPDT = 0x0B5 - SYS___FWRITABLE = 0xD64 - SYS___FWRITING = 0xD66 - SYS___FWRITING_UNLOCKED = 0xD6E - SYS___GETCB = 0x0B4 - SYS___GETGRGID1 = 0xD5B - SYS___GETGRNAM1 = 0xD5C - SYS___GETTHENT = 0xCE5 - SYS___GETTOD = 0xD3E - SYS___HYPOTF_H = 0xAF6 - SYS___HYPOTL_H = 0xAF7 - SYS___ILOGBF_B = 0xA7F - SYS___ILOGBF_H = 0xA80 - SYS___ILOGBL_B = 0xA82 - SYS___ILOGBL_H = 0xA83 - SYS___ISBLANK_A = 0xB2E - SYS___ISBLNK = 0x0FE - SYS___ISWBLANK_A = 0xB2F - SYS___LE_CEEGTJS = 0xD72 - SYS___LE_TRACEBACK = 0xB7A - SYS___LGAMMAL_H = 0xA62 - SYS___LGAMMA_B_C99 = 0xB39 - SYS___LGAMMA_H_C99 = 0xB38 - SYS___LGAMMA_R_C99 = 0xB3A - SYS___LLRINTF_B = 0xA88 - SYS___LLRINTF_H = 0xA89 - SYS___LLRINTL_B = 0xA8B - SYS___LLRINTL_H = 0xA8C - SYS___LLRINT_B = 0xA85 - SYS___LLRINT_H = 0xA86 - SYS___LLROUNDF_B = 0xA91 - SYS___LLROUNDF_H = 0xA92 - SYS___LLROUNDL_B = 0xA94 - SYS___LLROUNDL_H = 0xA95 - SYS___LLROUND_B = 0xA8E - SYS___LLROUND_H = 0xA8F - SYS___LOCALE_CTL = 0xD47 - SYS___LOG1PF_H = 0xA60 - SYS___LOG1PL_H = 0xA61 - SYS___LOGBF_B = 0xA97 - SYS___LOGBF_H = 0xA98 - SYS___LOGBL_B = 0xA9A - SYS___LOGBL_H = 0xA9B - SYS___LOGIN_APPLID = 0xCE2 - SYS___LRINTF_B = 0xAA0 - SYS___LRINTF_H = 0xAA1 - SYS___LRINTL_B = 0xAA3 - SYS___LRINTL_H = 0xAA4 - SYS___LRINT_B = 0xA9D - SYS___LRINT_H = 0xA9E - SYS___LROUNDF_FIXUP = 0xB31 - SYS___LROUNDL_B = 0xAA6 - SYS___LROUNDL_H = 0xAA7 - SYS___LROUND_FIXUP = 0xB30 - SYS___MOSERVICES = 0xD3D - SYS___MUST_STAY_CLEAN = 0xB7C - SYS___NANF_B = 0xAAB - SYS___NANL_B = 0xAAD - SYS___NAN_B = 0xAA9 - SYS___NEARBYINTF_B = 0xAB2 - SYS___NEARBYINTF_H = 0xAB3 - SYS___NEARBYINTL_B = 0xAB5 - SYS___NEARBYINTL_H = 0xAB6 - SYS___NEARBYINT_B = 0xAAF - SYS___NEARBYINT_H = 0xAB0 - SYS___NEXTAFTERF_B = 0xAB8 - SYS___NEXTAFTERF_H = 0xAB9 - SYS___NEXTAFTERL_B = 0xABB - SYS___NEXTAFTERL_H = 0xABC - SYS___NEXTTOWARDF_B = 0xAC1 - SYS___NEXTTOWARDF_H = 0xAC2 - SYS___NEXTTOWARDL_B = 0xAC4 - SYS___NEXTTOWARDL_H = 0xAC5 - SYS___NEXTTOWARD_B = 0xABE - SYS___NEXTTOWARD_H = 0xABF - SYS___O_ENV = 0xB7D - SYS___PASSWD_APPLID = 0xCE3 - SYS___PTOD1 = 0xC9E - SYS___PTOD2 = 0xC9F - SYS___PTOD4 = 0xCA0 - SYS___REGCOMP_STD = 0x0EA - SYS___REMAINDERF_H = 0xAC6 - SYS___REMAINDERL_H = 0xAC7 - SYS___REMQUOD128 = 0xC10 - SYS___REMQUOD32 = 0xC0E - SYS___REMQUOD64 = 0xC0F - SYS___REMQUOF_H = 0xAC9 - SYS___REMQUOL_H = 0xACA - SYS___REMQUO_H = 0xAC8 - SYS___RINTF_B = 0xACC - SYS___RINTL_B = 0xACE - SYS___ROUNDF_B = 0xAD3 - SYS___ROUNDF_H = 0xAD4 - SYS___ROUNDL_B = 0xAD6 - SYS___ROUNDL_H = 0xAD7 - SYS___ROUND_B = 0xAD0 - SYS___ROUND_H = 0xAD1 - SYS___SCALBLNF_B = 0xADC - SYS___SCALBLNF_H = 0xADD - SYS___SCALBLNL_B = 0xADF - SYS___SCALBLNL_H = 0xAE0 - SYS___SCALBLN_B = 0xAD9 - SYS___SCALBLN_H = 0xADA - SYS___SCALBNF_B = 0xAE4 - SYS___SCALBNF_H = 0xAE5 - SYS___SCALBNL_B = 0xAE7 - SYS___SCALBNL_H = 0xAE8 - SYS___SCALBN_B = 0xAE1 - SYS___SCALBN_H = 0xAE2 - SYS___SETENV = 0x0C8 - SYS___SINPID128 = 0xC73 - SYS___SINPID32 = 0xC71 - SYS___SINPID64 = 0xC72 - SYS___SMF_RECORD2 = 0xD48 - SYS___STATIC_REINIT = 0xB3D - SYS___TGAMMAF_H_C99 = 0xB79 - SYS___TGAMMAL_H = 0xAE9 - SYS___TGAMMA_H_C99 = 0xB78 - SYS___TOCSNAME2 = 0xC9A - SYS_CEIL = 0x01F - SYS_CHAUDIT = 0x1E0 - SYS_EXP = 0x01A - SYS_FCHAUDIT = 0x1E1 - SYS_FREXP = 0x01D - SYS_GETGROUPSBYNAME = 0x1E2 - SYS_GETPWUID = 0x1A0 - SYS_GETUID = 0x1A1 - SYS_ISATTY = 0x1A3 - SYS_KILL = 0x1A4 - SYS_LDEXP = 0x01E - SYS_LINK = 0x1A5 - SYS_LOG10 = 0x01C - SYS_LSEEK = 0x1A6 - SYS_LSTAT = 0x1A7 - SYS_MKDIR = 0x1A8 - SYS_MKFIFO = 0x1A9 - SYS_MKNOD = 0x1AA - SYS_MODF = 0x01B - SYS_MOUNT = 0x1AB - SYS_OPEN = 0x1AC - SYS_OPENDIR = 0x1AD - SYS_PATHCONF = 0x1AE - SYS_PAUSE = 0x1AF - SYS_PIPE = 0x1B0 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED - SYS_PTHREAD_ATTR_INIT = 0x1E6 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC - SYS_PTHREAD_CANCEL = 0x1EE - SYS_PTHREAD_CLEANUP_POP = 0x1F0 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 - SYS_PTHREAD_COND_DESTROY = 0x1F4 - SYS_PTHREAD_COND_INIT = 0x1F3 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 - SYS_PTHREAD_COND_WAIT = 0x1F7 - SYS_PTHREAD_CREATE = 0x1F9 - SYS_PTHREAD_DETACH = 0x1FA - SYS_PTHREAD_EQUAL = 0x1FB - SYS_PTHREAD_EXIT = 0x1E4 - SYS_PTHREAD_GETSPECIFIC = 0x1FC - SYS_PTHREAD_JOIN = 0x1FD - SYS_PTHREAD_KEY_CREATE = 0x1FE - SYS_PTHREAD_KILL = 0x1E5 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF - SYS_READ = 0x1B2 - SYS_READDIR = 0x1B3 - SYS_READLINK = 0x1B4 - SYS_REWINDDIR = 0x1B5 - SYS_RMDIR = 0x1B6 - SYS_SETEGID = 0x1B7 - SYS_SETEUID = 0x1B8 - SYS_SETGID = 0x1B9 - SYS_SETPGID = 0x1BA - SYS_SETSID = 0x1BB - SYS_SETUID = 0x1BC - SYS_SIGACTION = 0x1BD - SYS_SIGADDSET = 0x1BE - SYS_SIGDELSET = 0x1BF - SYS_SIGEMPTYSET = 0x1C0 - SYS_SIGFILLSET = 0x1C1 - SYS_SIGISMEMBER = 0x1C2 - SYS_SIGLONGJMP = 0x1C3 - SYS_SIGPENDING = 0x1C4 - SYS_SIGPROCMASK = 0x1C5 - SYS_SIGSETJMP = 0x1C6 - SYS_SIGSUSPEND = 0x1C7 - SYS_SIGWAIT = 0x1E3 - SYS_SLEEP = 0x1C8 - SYS_STAT = 0x1C9 - SYS_SYMLINK = 0x1CB - SYS_SYSCONF = 0x1CC - SYS_TCDRAIN = 0x1CD - SYS_TCFLOW = 0x1CE - SYS_TCFLUSH = 0x1CF - SYS_TCGETATTR = 0x1D0 - SYS_TCGETPGRP = 0x1D1 - SYS_TCSENDBREAK = 0x1D2 - SYS_TCSETATTR = 0x1D3 - SYS_TCSETPGRP = 0x1D4 - SYS_TIMES = 0x1D5 - SYS_TTYNAME = 0x1D6 - SYS_TZSET = 0x1D7 - SYS_UMASK = 0x1D8 - SYS_UMOUNT = 0x1D9 - SYS_UNAME = 0x1DA - SYS_UNLINK = 0x1DB - SYS_UTIME = 0x1DC - SYS_WAIT = 0x1DD - SYS_WAITPID = 0x1DE - SYS_WRITE = 0x1DF - SYS_W_GETPSENT = 0x1B1 - SYS_W_IOCTL = 0x1A2 - SYS_W_STATFS = 0x1CA - SYS_A64L = 0x2EF - SYS_BCMP = 0x2B9 - SYS_BCOPY = 0x2BA - SYS_BZERO = 0x2BB - SYS_CATCLOSE = 0x2B6 - SYS_CATGETS = 0x2B7 - SYS_CATOPEN = 0x2B8 - SYS_CRYPT = 0x2AC - SYS_DBM_CLEARERR = 0x2F7 - SYS_DBM_CLOSE = 0x2F8 - SYS_DBM_DELETE = 0x2F9 - SYS_DBM_ERROR = 0x2FA - SYS_DBM_FETCH = 0x2FB - SYS_DBM_FIRSTKEY = 0x2FC - SYS_DBM_NEXTKEY = 0x2FD - SYS_DBM_OPEN = 0x2FE - SYS_DBM_STORE = 0x2FF - SYS_DRAND48 = 0x2B2 - SYS_ENCRYPT = 0x2AD - SYS_ENDUTXENT = 0x2E1 - SYS_ERAND48 = 0x2B3 - SYS_ERF = 0x02C - SYS_ERFC = 0x02D - SYS_FCHDIR = 0x2D9 - SYS_FFS = 0x2BC - SYS_FMTMSG = 0x2E5 - SYS_FSTATVFS = 0x2B4 - SYS_FTIME = 0x2F5 - SYS_GAMMA = 0x02E - SYS_GETDATE = 0x2A6 - SYS_GETPAGESIZE = 0x2D8 - SYS_GETTIMEOFDAY = 0x2F6 - SYS_GETUTXENT = 0x2E0 - SYS_GETUTXID = 0x2E2 - SYS_GETUTXLINE = 0x2E3 - SYS_HCREATE = 0x2C6 - SYS_HDESTROY = 0x2C7 - SYS_HSEARCH = 0x2C8 - SYS_HYPOT = 0x02B - SYS_INDEX = 0x2BD - SYS_INITSTATE = 0x2C2 - SYS_INSQUE = 0x2CF - SYS_ISASCII = 0x2ED - SYS_JRAND48 = 0x2E6 - SYS_L64A = 0x2F0 - SYS_LCONG48 = 0x2EA - SYS_LFIND = 0x2C9 - SYS_LRAND48 = 0x2E7 - SYS_LSEARCH = 0x2CA - SYS_MEMCCPY = 0x2D4 - SYS_MRAND48 = 0x2E8 - SYS_NRAND48 = 0x2E9 - SYS_PCLOSE = 0x2D2 - SYS_POPEN = 0x2D1 - SYS_PUTUTXLINE = 0x2E4 - SYS_RANDOM = 0x2C4 - SYS_REMQUE = 0x2D0 - SYS_RINDEX = 0x2BE - SYS_SEED48 = 0x2EC - SYS_SETKEY = 0x2AE - SYS_SETSTATE = 0x2C3 - SYS_SETUTXENT = 0x2DF - SYS_SRAND48 = 0x2EB - SYS_SRANDOM = 0x2C5 - SYS_STATVFS = 0x2B5 - SYS_STRCASECMP = 0x2BF - SYS_STRDUP = 0x2C0 - SYS_STRNCASECMP = 0x2C1 - SYS_SWAB = 0x2D3 - SYS_TDELETE = 0x2CB - SYS_TFIND = 0x2CC - SYS_TOASCII = 0x2EE - SYS_TSEARCH = 0x2CD - SYS_TWALK = 0x2CE - SYS_UALARM = 0x2F1 - SYS_USLEEP = 0x2F2 - SYS_WAIT3 = 0x2A7 - SYS_WAITID = 0x2A8 - SYS_Y1 = 0x02A - SYS___ATOE = 0x2DB - SYS___ATOE_L = 0x2DC - SYS___CATTRM = 0x2A9 - SYS___CNVBLK = 0x2AF - SYS___CRYTRM = 0x2B0 - SYS___DLGHT = 0x2A1 - SYS___ECRTRM = 0x2B1 - SYS___ETOA = 0x2DD - SYS___ETOA_L = 0x2DE - SYS___GDTRM = 0x2AA - SYS___OCLCK = 0x2DA - SYS___OPARGF = 0x2A2 - SYS___OPERRF = 0x2A5 - SYS___OPINDF = 0x2A4 - SYS___OPOPTF = 0x2A3 - SYS___RNDTRM = 0x2AB - SYS___SRCTRM = 0x2F4 - SYS___TZONE = 0x2A0 - SYS___UTXTRM = 0x2F3 - SYS_ASIN = 0x03E - SYS_ISXDIGIT = 0x03B - SYS_SETLOCAL = 0x03A - SYS_SETLOCALE = 0x03A - SYS_SIN = 0x03F - SYS_TOLOWER = 0x03C - SYS_TOUPPER = 0x03D - SYS_ACCEPT_AND_RECV = 0x4F7 - SYS_ATOL = 0x04E - SYS_CHECKSCH = 0x4BC - SYS_CHECKSCHENV = 0x4BC - SYS_CLEARERR = 0x04C - SYS_CONNECTS = 0x4B5 - SYS_CONNECTSERVER = 0x4B5 - SYS_CONNECTW = 0x4B4 - SYS_CONNECTWORKMGR = 0x4B4 - SYS_CONTINUE = 0x4B3 - SYS_CONTINUEWORKUNIT = 0x4B3 - SYS_COPYSIGN = 0x4C2 - SYS_CREATEWO = 0x4B2 - SYS_CREATEWORKUNIT = 0x4B2 - SYS_DELETEWO = 0x4B9 - SYS_DELETEWORKUNIT = 0x4B9 - SYS_DISCONNE = 0x4B6 - SYS_DISCONNECTSERVER = 0x4B6 - SYS_FEOF = 0x04D - SYS_FERROR = 0x04A - SYS_FINITE = 0x4C8 - SYS_GAMMA_R = 0x4E2 - SYS_JOINWORK = 0x4B7 - SYS_JOINWORKUNIT = 0x4B7 - SYS_LEAVEWOR = 0x4B8 - SYS_LEAVEWORKUNIT = 0x4B8 - SYS_LGAMMA_R = 0x4EB - SYS_MATHERR = 0x4D0 - SYS_PERROR = 0x04F - SYS_QUERYMET = 0x4BA - SYS_QUERYMETRICS = 0x4BA - SYS_QUERYSCH = 0x4BB - SYS_QUERYSCHENV = 0x4BB - SYS_REWIND = 0x04B - SYS_SCALBN = 0x4D4 - SYS_SIGNIFIC = 0x4D5 - SYS_SIGNIFICAND = 0x4D5 - SYS___ACOSH_B = 0x4DA - SYS___ACOS_B = 0x4D9 - SYS___ASINH_B = 0x4BE - SYS___ASIN_B = 0x4DB - SYS___ATAN2_B = 0x4DC - SYS___ATANH_B = 0x4DD - SYS___ATAN_B = 0x4BF - SYS___CBRT_B = 0x4C0 - SYS___CEIL_B = 0x4C1 - SYS___COSH_B = 0x4DE - SYS___COS_B = 0x4C3 - SYS___DGHT = 0x4A8 - SYS___ENVN = 0x4B0 - SYS___ERFC_B = 0x4C5 - SYS___ERF_B = 0x4C4 - SYS___EXPM1_B = 0x4C6 - SYS___EXP_B = 0x4DF - SYS___FABS_B = 0x4C7 - SYS___FLOOR_B = 0x4C9 - SYS___FMOD_B = 0x4E0 - SYS___FP_SETMODE = 0x4F8 - SYS___FREXP_B = 0x4CA - SYS___GAMMA_B = 0x4E1 - SYS___GDRR = 0x4A1 - SYS___HRRNO = 0x4A2 - SYS___HYPOT_B = 0x4E3 - SYS___ILOGB_B = 0x4CB - SYS___ISNAN_B = 0x4CC - SYS___J0_B = 0x4E4 - SYS___J1_B = 0x4E6 - SYS___JN_B = 0x4E8 - SYS___LDEXP_B = 0x4CD - SYS___LGAMMA_B = 0x4EA - SYS___LOG10_B = 0x4ED - SYS___LOG1P_B = 0x4CE - SYS___LOGB_B = 0x4CF - SYS___LOGIN = 0x4F5 - SYS___LOG_B = 0x4EC - SYS___MLOCKALL = 0x4B1 - SYS___MODF_B = 0x4D1 - SYS___NEXTAFTER_B = 0x4D2 - SYS___OPENDIR2 = 0x4F3 - SYS___OPEN_STAT = 0x4F6 - SYS___OPND = 0x4A5 - SYS___OPPT = 0x4A6 - SYS___OPRG = 0x4A3 - SYS___OPRR = 0x4A4 - SYS___PID_AFFINITY = 0x4BD - SYS___POW_B = 0x4EE - SYS___READDIR2 = 0x4F4 - SYS___REMAINDER_B = 0x4EF - SYS___RINT_B = 0x4D3 - SYS___SCALB_B = 0x4F0 - SYS___SIGACTIONSET = 0x4FB - SYS___SIGGM = 0x4A7 - SYS___SINH_B = 0x4F1 - SYS___SIN_B = 0x4D6 - SYS___SQRT_B = 0x4F2 - SYS___TANH_B = 0x4D8 - SYS___TAN_B = 0x4D7 - SYS___TRRNO = 0x4AF - SYS___TZNE = 0x4A9 - SYS___TZZN = 0x4AA - SYS___UCREATE = 0x4FC - SYS___UFREE = 0x4FE - SYS___UHEAPREPORT = 0x4FF - SYS___UMALLOC = 0x4FD - SYS___Y0_B = 0x4E5 - SYS___Y1_B = 0x4E7 - SYS___YN_B = 0x4E9 - SYS_ABORT = 0x05C - SYS_ASCTIME_R = 0x5E0 - SYS_ATEXIT = 0x05D - SYS_CONNECTE = 0x5AE - SYS_CONNECTEXPORTIMPORT = 0x5AE - SYS_CTIME_R = 0x5E1 - SYS_DN_COMP = 0x5DF - SYS_DN_EXPAND = 0x5DD - SYS_DN_SKIPNAME = 0x5DE - SYS_EXIT = 0x05A - SYS_EXPORTWO = 0x5A1 - SYS_EXPORTWORKUNIT = 0x5A1 - SYS_EXTRACTW = 0x5A5 - SYS_EXTRACTWORKUNIT = 0x5A5 - SYS_FSEEKO = 0x5C9 - SYS_FTELLO = 0x5C8 - SYS_GETGRGID_R = 0x5E7 - SYS_GETGRNAM_R = 0x5E8 - SYS_GETLOGIN_R = 0x5E9 - SYS_GETPWNAM_R = 0x5EA - SYS_GETPWUID_R = 0x5EB - SYS_GMTIME_R = 0x5E2 - SYS_IMPORTWO = 0x5A3 - SYS_IMPORTWORKUNIT = 0x5A3 - SYS_INET_NTOP = 0x5D3 - SYS_INET_PTON = 0x5D4 - SYS_LLABS = 0x5CE - SYS_LLDIV = 0x5CB - SYS_LOCALTIME_R = 0x5E3 - SYS_PTHREAD_ATFORK = 0x5ED - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 - SYS_PTHREAD_DETACH_U98 = 0x5FD - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE - SYS_PTHREAD_KEY_DELETE = 0x5F5 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 - SYS_PTHREAD_SIGMASK = 0x5F7 - SYS_QUERYENC = 0x5AD - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD - SYS_RAISE = 0x05E - SYS_RAND_R = 0x5E4 - SYS_READDIR_R = 0x5E6 - SYS_REALLOC = 0x05B - SYS_RES_INIT = 0x5D8 - SYS_RES_MKQUERY = 0x5D7 - SYS_RES_QUERY = 0x5D9 - SYS_RES_QUERYDOMAIN = 0x5DC - SYS_RES_SEARCH = 0x5DA - SYS_RES_SEND = 0x5DB - SYS_SETJMP = 0x05F - SYS_SIGQUEUE = 0x5A9 - SYS_STRTOK_R = 0x5E5 - SYS_STRTOLL = 0x5B0 - SYS_STRTOULL = 0x5B1 - SYS_TTYNAME_R = 0x5EC - SYS_UNDOEXPO = 0x5A2 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 - SYS_UNDOIMPO = 0x5A4 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 - SYS_WCSTOLL = 0x5CC - SYS_WCSTOULL = 0x5CD - SYS___ABORT = 0x05C - SYS___CONSOLE2 = 0x5D2 - SYS___CPL = 0x5A6 - SYS___DISCARDDATA = 0x5F8 - SYS___DSA_PREV = 0x5B2 - SYS___EP_FIND = 0x5B3 - SYS___FP_SWAPMODE = 0x5AF - SYS___GETUSERID = 0x5AB - SYS___GET_CPUID = 0x5B9 - SYS___GET_SYSTEM_SETTINGS = 0x5BA - SYS___IPDOMAINNAME = 0x5AC - SYS___MAP_INIT = 0x5A7 - SYS___MAP_SERVICE = 0x5A8 - SYS___MOUNT = 0x5AA - SYS___MSGRCV_TIMED = 0x5B7 - SYS___RES = 0x5D6 - SYS___SEMOP_TIMED = 0x5B8 - SYS___SERVER_THREADS_QUERY = 0x5B4 - SYS_FPRINTF = 0x06D - SYS_FSCANF = 0x06A - SYS_PRINTF = 0x06F - SYS_SETBUF = 0x06B - SYS_SETVBUF = 0x06C - SYS_SSCANF = 0x06E - SYS___CATGETS_A = 0x6C0 - SYS___CHAUDIT_A = 0x6F4 - SYS___CHMOD_A = 0x6E8 - SYS___COLLATE_INIT_A = 0x6AC - SYS___CREAT_A = 0x6F6 - SYS___CTYPE_INIT_A = 0x6AF - SYS___DLLLOAD_A = 0x6DF - SYS___DLLQUERYFN_A = 0x6E0 - SYS___DLLQUERYVAR_A = 0x6E1 - SYS___E2A_L = 0x6E3 - SYS___EXECLE_A = 0x6A0 - SYS___EXECLP_A = 0x6A4 - SYS___EXECVE_A = 0x6C1 - SYS___EXECVP_A = 0x6C2 - SYS___EXECV_A = 0x6B1 - SYS___FPRINTF_A = 0x6FA - SYS___GETADDRINFO_A = 0x6BF - SYS___GETNAMEINFO_A = 0x6C4 - SYS___GET_WCTYPE_STD_A = 0x6AE - SYS___ICONV_OPEN_A = 0x6DE - SYS___IF_INDEXTONAME_A = 0x6DC - SYS___IF_NAMETOINDEX_A = 0x6DB - SYS___ISWCTYPE_A = 0x6B0 - SYS___IS_WCTYPE_STD_A = 0x6B2 - SYS___LOCALECONV_A = 0x6B8 - SYS___LOCALECONV_STD_A = 0x6B9 - SYS___LOCALE_INIT_A = 0x6B7 - SYS___LSTAT_A = 0x6EE - SYS___LSTAT_O_A = 0x6EF - SYS___MKDIR_A = 0x6E9 - SYS___MKFIFO_A = 0x6EC - SYS___MKNOD_A = 0x6F0 - SYS___MONETARY_INIT_A = 0x6BC - SYS___MOUNT_A = 0x6F1 - SYS___NL_CSINFO_A = 0x6D6 - SYS___NL_LANGINFO_A = 0x6BA - SYS___NL_LNAGINFO_STD_A = 0x6BB - SYS___NL_MONINFO_A = 0x6D7 - SYS___NL_NUMINFO_A = 0x6D8 - SYS___NL_RESPINFO_A = 0x6D9 - SYS___NL_TIMINFO_A = 0x6DA - SYS___NUMERIC_INIT_A = 0x6C6 - SYS___OPEN_A = 0x6F7 - SYS___PRINTF_A = 0x6DD - SYS___RESP_INIT_A = 0x6C7 - SYS___RPMATCH_A = 0x6C8 - SYS___RPMATCH_C_A = 0x6C9 - SYS___RPMATCH_STD_A = 0x6CA - SYS___SETLOCALE_A = 0x6F9 - SYS___SPAWNP_A = 0x6C5 - SYS___SPAWN_A = 0x6C3 - SYS___SPRINTF_A = 0x6FB - SYS___STAT_A = 0x6EA - SYS___STAT_O_A = 0x6EB - SYS___STRCOLL_STD_A = 0x6A1 - SYS___STRFMON_A = 0x6BD - SYS___STRFMON_STD_A = 0x6BE - SYS___STRFTIME_A = 0x6CC - SYS___STRFTIME_STD_A = 0x6CD - SYS___STRPTIME_A = 0x6CE - SYS___STRPTIME_STD_A = 0x6CF - SYS___STRXFRM_A = 0x6A2 - SYS___STRXFRM_C_A = 0x6A3 - SYS___STRXFRM_STD_A = 0x6A5 - SYS___SYNTAX_INIT_A = 0x6D4 - SYS___TIME_INIT_A = 0x6CB - SYS___TOD_INIT_A = 0x6D5 - SYS___TOWLOWER_A = 0x6B3 - SYS___TOWLOWER_STD_A = 0x6B4 - SYS___TOWUPPER_A = 0x6B5 - SYS___TOWUPPER_STD_A = 0x6B6 - SYS___UMOUNT_A = 0x6F2 - SYS___VFPRINTF_A = 0x6FC - SYS___VPRINTF_A = 0x6FD - SYS___VSPRINTF_A = 0x6FE - SYS___VSWPRINTF_A = 0x6FF - SYS___WCSCOLL_A = 0x6A6 - SYS___WCSCOLL_C_A = 0x6A7 - SYS___WCSCOLL_STD_A = 0x6A8 - SYS___WCSFTIME_A = 0x6D0 - SYS___WCSFTIME_STD_A = 0x6D1 - SYS___WCSXFRM_A = 0x6A9 - SYS___WCSXFRM_C_A = 0x6AA - SYS___WCSXFRM_STD_A = 0x6AB - SYS___WCTYPE_A = 0x6AD - SYS___W_GETMNTENT_A = 0x6F5 - SYS_____CCSIDTYPE_A = 0x6E6 - SYS_____CHATTR_A = 0x6E2 - SYS_____CSNAMETYPE_A = 0x6E7 - SYS_____OPEN_STAT_A = 0x6ED - SYS_____SPAWN2_A = 0x6D2 - SYS_____SPAWNP2_A = 0x6D3 - SYS_____TOCCSID_A = 0x6E4 - SYS_____TOCSNAME_A = 0x6E5 - SYS_ACL_FREE = 0x7FF - SYS_ACL_INIT = 0x7FE - SYS_FWIDE = 0x7DF - SYS_FWPRINTF = 0x7D1 - SYS_FWRITE = 0x07E - SYS_FWSCANF = 0x7D5 - SYS_GETCHAR = 0x07B - SYS_GETS = 0x07C - SYS_M_CREATE_LAYOUT = 0x7C9 - SYS_M_DESTROY_LAYOUT = 0x7CA - SYS_M_GETVALUES_LAYOUT = 0x7CB - SYS_M_SETVALUES_LAYOUT = 0x7CC - SYS_M_TRANSFORM_LAYOUT = 0x7CD - SYS_M_WTRANSFORM_LAYOUT = 0x7CE - SYS_PREAD = 0x7C7 - SYS_PUTC = 0x07D - SYS_PUTCHAR = 0x07A - SYS_PUTS = 0x07F - SYS_PWRITE = 0x7C8 - SYS_TOWCTRAN = 0x7D8 - SYS_TOWCTRANS = 0x7D8 - SYS_UNATEXIT = 0x7B5 - SYS_VFWPRINT = 0x7D3 - SYS_VFWPRINTF = 0x7D3 - SYS_VWPRINTF = 0x7D4 - SYS_WCTRANS = 0x7D7 - SYS_WPRINTF = 0x7D2 - SYS_WSCANF = 0x7D6 - SYS___ASCTIME_R_A = 0x7A1 - SYS___BASENAME_A = 0x7DC - SYS___BTOWC_A = 0x7E4 - SYS___CDUMP_A = 0x7B7 - SYS___CEE3DMP_A = 0x7B6 - SYS___CEILF_H = 0x7F4 - SYS___CEILL_H = 0x7F5 - SYS___CEIL_H = 0x7EA - SYS___CRYPT_A = 0x7BE - SYS___CSNAP_A = 0x7B8 - SYS___CTEST_A = 0x7B9 - SYS___CTIME_R_A = 0x7A2 - SYS___CTRACE_A = 0x7BA - SYS___DBM_OPEN_A = 0x7E6 - SYS___DIRNAME_A = 0x7DD - SYS___FABSF_H = 0x7FA - SYS___FABSL_H = 0x7FB - SYS___FABS_H = 0x7ED - SYS___FGETWC_A = 0x7AA - SYS___FGETWS_A = 0x7AD - SYS___FLOORF_H = 0x7F6 - SYS___FLOORL_H = 0x7F7 - SYS___FLOOR_H = 0x7EB - SYS___FPUTWC_A = 0x7A5 - SYS___FPUTWS_A = 0x7A8 - SYS___GETTIMEOFDAY_A = 0x7AE - SYS___GETWCHAR_A = 0x7AC - SYS___GETWC_A = 0x7AB - SYS___GLOB_A = 0x7DE - SYS___GMTIME_A = 0x7AF - SYS___GMTIME_R_A = 0x7B0 - SYS___INET_PTON_A = 0x7BC - SYS___J0_H = 0x7EE - SYS___J1_H = 0x7EF - SYS___JN_H = 0x7F0 - SYS___LOCALTIME_A = 0x7B1 - SYS___LOCALTIME_R_A = 0x7B2 - SYS___MALLOC24 = 0x7FC - SYS___MALLOC31 = 0x7FD - SYS___MKTIME_A = 0x7B3 - SYS___MODFF_H = 0x7F8 - SYS___MODFL_H = 0x7F9 - SYS___MODF_H = 0x7EC - SYS___OPENDIR_A = 0x7C2 - SYS___OSNAME = 0x7E0 - SYS___PUTWCHAR_A = 0x7A7 - SYS___PUTWC_A = 0x7A6 - SYS___READDIR_A = 0x7C3 - SYS___STRTOLL_A = 0x7A3 - SYS___STRTOULL_A = 0x7A4 - SYS___SYSLOG_A = 0x7BD - SYS___TZZNA = 0x7B4 - SYS___UNGETWC_A = 0x7A9 - SYS___UTIME_A = 0x7A0 - SYS___VFPRINTF2_A = 0x7E7 - SYS___VPRINTF2_A = 0x7E8 - SYS___VSPRINTF2_A = 0x7E9 - SYS___VSWPRNTF2_A = 0x7BB - SYS___WCSTOD_A = 0x7D9 - SYS___WCSTOL_A = 0x7DA - SYS___WCSTOUL_A = 0x7DB - SYS___WCTOB_A = 0x7E5 - SYS___Y0_H = 0x7F1 - SYS___Y1_H = 0x7F2 - SYS___YN_H = 0x7F3 - SYS_____OPENDIR2_A = 0x7BF - SYS_____OSNAME_A = 0x7E1 - SYS_____READDIR2_A = 0x7C0 - SYS_DLCLOSE = 0x8DF - SYS_DLERROR = 0x8E0 - SYS_DLOPEN = 0x8DD - SYS_DLSYM = 0x8DE - SYS_FLOCKFILE = 0x8D3 - SYS_FTRYLOCKFILE = 0x8D4 - SYS_FUNLOCKFILE = 0x8D5 - SYS_GETCHAR_UNLOCKED = 0x8D7 - SYS_GETC_UNLOCKED = 0x8D6 - SYS_PUTCHAR_UNLOCKED = 0x8D9 - SYS_PUTC_UNLOCKED = 0x8D8 - SYS_SNPRINTF = 0x8DA - SYS_VSNPRINTF = 0x8DB - SYS_WCSCSPN = 0x08B - SYS_WCSLEN = 0x08C - SYS_WCSNCAT = 0x08D - SYS_WCSNCMP = 0x08A - SYS_WCSNCPY = 0x08F - SYS_WCSSPN = 0x08E - SYS___ABSF_H = 0x8E7 - SYS___ABSL_H = 0x8E8 - SYS___ABS_H = 0x8E6 - SYS___ACOSF_H = 0x8EA - SYS___ACOSH_H = 0x8EC - SYS___ACOSL_H = 0x8EB - SYS___ACOS_H = 0x8E9 - SYS___ASINF_H = 0x8EE - SYS___ASINH_H = 0x8F0 - SYS___ASINL_H = 0x8EF - SYS___ASIN_H = 0x8ED - SYS___ATAN2F_H = 0x8F8 - SYS___ATAN2L_H = 0x8F9 - SYS___ATAN2_H = 0x8F7 - SYS___ATANF_H = 0x8F2 - SYS___ATANHF_H = 0x8F5 - SYS___ATANHL_H = 0x8F6 - SYS___ATANH_H = 0x8F4 - SYS___ATANL_H = 0x8F3 - SYS___ATAN_H = 0x8F1 - SYS___CBRT_H = 0x8FA - SYS___COPYSIGNF_H = 0x8FB - SYS___COPYSIGNL_H = 0x8FC - SYS___COSF_H = 0x8FE - SYS___COSL_H = 0x8FF - SYS___COS_H = 0x8FD - SYS___DLERROR_A = 0x8D2 - SYS___DLOPEN_A = 0x8D0 - SYS___DLSYM_A = 0x8D1 - SYS___GETUTXENT_A = 0x8C6 - SYS___GETUTXID_A = 0x8C7 - SYS___GETUTXLINE_A = 0x8C8 - SYS___ITOA = 0x8AA - SYS___ITOA_A = 0x8B0 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 - SYS___LE_MSG_ADD_INSERT = 0x8A6 - SYS___LE_MSG_GET = 0x8A7 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 - SYS___LE_MSG_WRITE = 0x8A9 - SYS___LLTOA = 0x8AE - SYS___LLTOA_A = 0x8B4 - SYS___LTOA = 0x8AC - SYS___LTOA_A = 0x8B2 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC - SYS___PUTC_UNLOCKED_A = 0x8CB - SYS___PUTUTXLINE_A = 0x8C9 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 - SYS___REXEC_A = 0x8C4 - SYS___REXEC_AF_A = 0x8C5 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 - SYS___SNPRINTF_A = 0x8CD - SYS___SUPERKILL = 0x8A4 - SYS___TCGETATTR_A = 0x8A1 - SYS___TCSETATTR_A = 0x8A2 - SYS___ULLTOA = 0x8AF - SYS___ULLTOA_A = 0x8B5 - SYS___ULTOA = 0x8AD - SYS___ULTOA_A = 0x8B3 - SYS___UTOA = 0x8AB - SYS___UTOA_A = 0x8B1 - SYS___VHM_EVENT = 0x8E4 - SYS___VSNPRINTF_A = 0x8CE - SYS_____GETENV_A = 0x8C3 - SYS_____UTMPXNAME_A = 0x8CA - SYS_CACOSH = 0x9A0 - SYS_CACOSHF = 0x9A3 - SYS_CACOSHL = 0x9A6 - SYS_CARG = 0x9A9 - SYS_CARGF = 0x9AC - SYS_CARGL = 0x9AF - SYS_CASIN = 0x9B2 - SYS_CASINF = 0x9B5 - SYS_CASINH = 0x9BB - SYS_CASINHF = 0x9BE - SYS_CASINHL = 0x9C1 - SYS_CASINL = 0x9B8 - SYS_CATAN = 0x9C4 - SYS_CATANF = 0x9C7 - SYS_CATANH = 0x9CD - SYS_CATANHF = 0x9D0 - SYS_CATANHL = 0x9D3 - SYS_CATANL = 0x9CA - SYS_CCOS = 0x9D6 - SYS_CCOSF = 0x9D9 - SYS_CCOSH = 0x9DF - SYS_CCOSHF = 0x9E2 - SYS_CCOSHL = 0x9E5 - SYS_CCOSL = 0x9DC - SYS_CEXP = 0x9E8 - SYS_CEXPF = 0x9EB - SYS_CEXPL = 0x9EE - SYS_CIMAG = 0x9F1 - SYS_CIMAGF = 0x9F4 - SYS_CIMAGL = 0x9F7 - SYS_CLOGF = 0x9FD - SYS_MEMCHR = 0x09B - SYS_MEMCMP = 0x09A - SYS_STRCOLL = 0x09C - SYS_STRNCMP = 0x09D - SYS_STRRCHR = 0x09F - SYS_STRXFRM = 0x09E - SYS___CACOSHF_B = 0x9A4 - SYS___CACOSHF_H = 0x9A5 - SYS___CACOSHL_B = 0x9A7 - SYS___CACOSHL_H = 0x9A8 - SYS___CACOSH_B = 0x9A1 - SYS___CACOSH_H = 0x9A2 - SYS___CARGF_B = 0x9AD - SYS___CARGF_H = 0x9AE - SYS___CARGL_B = 0x9B0 - SYS___CARGL_H = 0x9B1 - SYS___CARG_B = 0x9AA - SYS___CARG_H = 0x9AB - SYS___CASINF_B = 0x9B6 - SYS___CASINF_H = 0x9B7 - SYS___CASINHF_B = 0x9BF - SYS___CASINHF_H = 0x9C0 - SYS___CASINHL_B = 0x9C2 - SYS___CASINHL_H = 0x9C3 - SYS___CASINH_B = 0x9BC - SYS___CASINH_H = 0x9BD - SYS___CASINL_B = 0x9B9 - SYS___CASINL_H = 0x9BA - SYS___CASIN_B = 0x9B3 - SYS___CASIN_H = 0x9B4 - SYS___CATANF_B = 0x9C8 - SYS___CATANF_H = 0x9C9 - SYS___CATANHF_B = 0x9D1 - SYS___CATANHF_H = 0x9D2 - SYS___CATANHL_B = 0x9D4 - SYS___CATANHL_H = 0x9D5 - SYS___CATANH_B = 0x9CE - SYS___CATANH_H = 0x9CF - SYS___CATANL_B = 0x9CB - SYS___CATANL_H = 0x9CC - SYS___CATAN_B = 0x9C5 - SYS___CATAN_H = 0x9C6 - SYS___CCOSF_B = 0x9DA - SYS___CCOSF_H = 0x9DB - SYS___CCOSHF_B = 0x9E3 - SYS___CCOSHF_H = 0x9E4 - SYS___CCOSHL_B = 0x9E6 - SYS___CCOSHL_H = 0x9E7 - SYS___CCOSH_B = 0x9E0 - SYS___CCOSH_H = 0x9E1 - SYS___CCOSL_B = 0x9DD - SYS___CCOSL_H = 0x9DE - SYS___CCOS_B = 0x9D7 - SYS___CCOS_H = 0x9D8 - SYS___CEXPF_B = 0x9EC - SYS___CEXPF_H = 0x9ED - SYS___CEXPL_B = 0x9EF - SYS___CEXPL_H = 0x9F0 - SYS___CEXP_B = 0x9E9 - SYS___CEXP_H = 0x9EA - SYS___CIMAGF_B = 0x9F5 - SYS___CIMAGF_H = 0x9F6 - SYS___CIMAGL_B = 0x9F8 - SYS___CIMAGL_H = 0x9F9 - SYS___CIMAG_B = 0x9F2 - SYS___CIMAG_H = 0x9F3 - SYS___CLOG = 0x9FA - SYS___CLOGF_B = 0x9FE - SYS___CLOGF_H = 0x9FF - SYS___CLOG_B = 0x9FB - SYS___CLOG_H = 0x9FC - SYS_ISWCTYPE = 0x10C - SYS_ISWXDIGI = 0x10A - SYS_ISWXDIGIT = 0x10A - SYS_MBSINIT = 0x10F - SYS_TOWLOWER = 0x10D - SYS_TOWUPPER = 0x10E - SYS_WCTYPE = 0x10B - SYS_WCSSTR = 0x11B - SYS___RPMTCH = 0x11A - SYS_WCSTOD = 0x12E - SYS_WCSTOK = 0x12C - SYS_WCSTOL = 0x12D - SYS_WCSTOUL = 0x12F - SYS_FGETWC = 0x13C - SYS_FGETWS = 0x13D - SYS_FPUTWC = 0x13E - SYS_FPUTWS = 0x13F - SYS_REGERROR = 0x13B - SYS_REGFREE = 0x13A - SYS_COLLEQUIV = 0x14F - SYS_COLLTOSTR = 0x14E - SYS_ISMCCOLLEL = 0x14C - SYS_STRTOCOLL = 0x14D - SYS_DLLFREE = 0x16F - SYS_DLLQUERYFN = 0x16D - SYS_DLLQUERYVAR = 0x16E - SYS_GETMCCOLL = 0x16A - SYS_GETWMCCOLL = 0x16B - SYS___ERR2AD = 0x16C - SYS_CFSETOSPEED = 0x17A - SYS_CHDIR = 0x17B - SYS_CHMOD = 0x17C - SYS_CHOWN = 0x17D - SYS_CLOSE = 0x17E - SYS_CLOSEDIR = 0x17F - SYS_LOG = 0x017 - SYS_COSH = 0x018 - SYS_FCHMOD = 0x18A - SYS_FCHOWN = 0x18B - SYS_FCNTL = 0x18C - SYS_FILENO = 0x18D - SYS_FORK = 0x18E - SYS_FPATHCONF = 0x18F - SYS_GETLOGIN = 0x19A - SYS_GETPGRP = 0x19C - SYS_GETPID = 0x19D - SYS_GETPPID = 0x19E - SYS_GETPWNAM = 0x19F - SYS_TANH = 0x019 - SYS_W_GETMNTENT = 0x19B - SYS_POW = 0x020 - SYS_PTHREAD_SELF = 0x20A - SYS_PTHREAD_SETINTR = 0x20B - SYS_PTHREAD_SETINTRTYPE = 0x20C - SYS_PTHREAD_SETSPECIFIC = 0x20D - SYS_PTHREAD_TESTINTR = 0x20E - SYS_PTHREAD_YIELD = 0x20F - SYS_SQRT = 0x021 - SYS_FLOOR = 0x022 - SYS_J1 = 0x023 - SYS_WCSPBRK = 0x23F - SYS_BSEARCH = 0x24C - SYS_FABS = 0x024 - SYS_GETENV = 0x24A - SYS_LDIV = 0x24D - SYS_SYSTEM = 0x24B - SYS_FMOD = 0x025 - SYS___RETHROW = 0x25F - SYS___THROW = 0x25E - SYS_J0 = 0x026 - SYS_PUTENV = 0x26A - SYS___GETENV = 0x26F - SYS_SEMCTL = 0x27A - SYS_SEMGET = 0x27B - SYS_SEMOP = 0x27C - SYS_SHMAT = 0x27D - SYS_SHMCTL = 0x27E - SYS_SHMDT = 0x27F - SYS_YN = 0x027 - SYS_JN = 0x028 - SYS_SIGALTSTACK = 0x28A - SYS_SIGHOLD = 0x28B - SYS_SIGIGNORE = 0x28C - SYS_SIGINTERRUPT = 0x28D - SYS_SIGPAUSE = 0x28E - SYS_SIGRELSE = 0x28F - SYS_GETOPT = 0x29A - SYS_GETSUBOPT = 0x29D - SYS_LCHOWN = 0x29B - SYS_SETPGRP = 0x29E - SYS_TRUNCATE = 0x29C - SYS_Y0 = 0x029 - SYS___GDERR = 0x29F - SYS_ISALPHA = 0x030 - SYS_VFORK = 0x30F - SYS__LONGJMP = 0x30D - SYS__SETJMP = 0x30E - SYS_GLOB = 0x31A - SYS_GLOBFREE = 0x31B - SYS_ISALNUM = 0x031 - SYS_PUTW = 0x31C - SYS_SEEKDIR = 0x31D - SYS_TELLDIR = 0x31E - SYS_TEMPNAM = 0x31F - SYS_GETTIMEOFDAY_R = 0x32E - SYS_ISLOWER = 0x032 - SYS_LGAMMA = 0x32C - SYS_REMAINDER = 0x32A - SYS_SCALB = 0x32B - SYS_SYNC = 0x32F - SYS_TTYSLOT = 0x32D - SYS_ENDPROTOENT = 0x33A - SYS_ENDSERVENT = 0x33B - SYS_GETHOSTBYADDR = 0x33D - SYS_GETHOSTBYADDR_R = 0x33C - SYS_GETHOSTBYNAME = 0x33F - SYS_GETHOSTBYNAME_R = 0x33E - SYS_ISCNTRL = 0x033 - SYS_GETSERVBYNAME = 0x34A - SYS_GETSERVBYPORT = 0x34B - SYS_GETSERVENT = 0x34C - SYS_GETSOCKNAME = 0x34D - SYS_GETSOCKOPT = 0x34E - SYS_INET_ADDR = 0x34F - SYS_ISDIGIT = 0x034 - SYS_ISGRAPH = 0x035 - SYS_SELECT = 0x35B - SYS_SELECTEX = 0x35C - SYS_SEND = 0x35D - SYS_SENDTO = 0x35F - SYS_CHROOT = 0x36A - SYS_ISNAN = 0x36D - SYS_ISUPPER = 0x036 - SYS_ULIMIT = 0x36C - SYS_UTIMES = 0x36E - SYS_W_STATVFS = 0x36B - SYS___H_ERRNO = 0x36F - SYS_GRANTPT = 0x37A - SYS_ISPRINT = 0x037 - SYS_TCGETSID = 0x37C - SYS_UNLOCKPT = 0x37B - SYS___TCGETCP = 0x37D - SYS___TCSETCP = 0x37E - SYS___TCSETTABLES = 0x37F - SYS_ISPUNCT = 0x038 - SYS_NLIST = 0x38C - SYS___IPDBCS = 0x38D - SYS___IPDSPX = 0x38E - SYS___IPMSGC = 0x38F - SYS___STHOSTENT = 0x38B - SYS___STSERVENT = 0x38A - SYS_ISSPACE = 0x039 - SYS_COS = 0x040 - SYS_T_ALLOC = 0x40A - SYS_T_BIND = 0x40B - SYS_T_CLOSE = 0x40C - SYS_T_CONNECT = 0x40D - SYS_T_ERROR = 0x40E - SYS_T_FREE = 0x40F - SYS_TAN = 0x041 - SYS_T_RCVREL = 0x41A - SYS_T_RCVUDATA = 0x41B - SYS_T_RCVUDERR = 0x41C - SYS_T_SND = 0x41D - SYS_T_SNDDIS = 0x41E - SYS_T_SNDREL = 0x41F - SYS_GETPMSG = 0x42A - SYS_ISASTREAM = 0x42B - SYS_PUTMSG = 0x42C - SYS_PUTPMSG = 0x42D - SYS_SINH = 0x042 - SYS___ISPOSIXON = 0x42E - SYS___OPENMVSREL = 0x42F - SYS_ACOS = 0x043 - SYS_ATAN = 0x044 - SYS_ATAN2 = 0x045 - SYS_FTELL = 0x046 - SYS_FGETPOS = 0x047 - SYS_SOCK_DEBUG = 0x47A - SYS_SOCK_DO_TESTSTOR = 0x47D - SYS_TAKESOCKET = 0x47E - SYS___SERVER_INIT = 0x47F - SYS_FSEEK = 0x048 - SYS___IPHOST = 0x48B - SYS___IPNODE = 0x48C - SYS___SERVER_CLASSIFY_CREATE = 0x48D - SYS___SERVER_CLASSIFY_DESTROY = 0x48E - SYS___SERVER_CLASSIFY_RESET = 0x48F - SYS___SMF_RECORD = 0x48A - SYS_FSETPOS = 0x049 - SYS___FNWSA = 0x49B - SYS___SPAWN2 = 0x49D - SYS___SPAWNP2 = 0x49E - SYS_ATOF = 0x050 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C - SYS_PTHREAD_RWLOCK_INIT = 0x50D - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F - SYS_ATOI = 0x051 - SYS___FP_CLASS = 0x51D - SYS___FP_CLR_FLAG = 0x51A - SYS___FP_FINITE = 0x51E - SYS___FP_ISNAN = 0x51F - SYS___FP_RAISE_XCP = 0x51C - SYS___FP_READ_FLAG = 0x51B - SYS_RAND = 0x052 - SYS_SIGTIMEDWAIT = 0x52D - SYS_SIGWAITINFO = 0x52E - SYS___CHKBFP = 0x52F - SYS___FPC_RS = 0x52C - SYS___FPC_RW = 0x52A - SYS___FPC_SM = 0x52B - SYS_STRTOD = 0x053 - SYS_STRTOL = 0x054 - SYS_STRTOUL = 0x055 - SYS_MALLOC = 0x056 - SYS_SRAND = 0x057 - SYS_CALLOC = 0x058 - SYS_FREE = 0x059 - SYS___OSENV = 0x59F - SYS___W_PIOCTL = 0x59E - SYS_LONGJMP = 0x060 - SYS___FLOORF_B = 0x60A - SYS___FLOORL_B = 0x60B - SYS___FREXPF_B = 0x60C - SYS___FREXPL_B = 0x60D - SYS___LDEXPF_B = 0x60E - SYS___LDEXPL_B = 0x60F - SYS_SIGNAL = 0x061 - SYS___ATAN2F_B = 0x61A - SYS___ATAN2L_B = 0x61B - SYS___COSHF_B = 0x61C - SYS___COSHL_B = 0x61D - SYS___EXPF_B = 0x61E - SYS___EXPL_B = 0x61F - SYS_TMPNAM = 0x062 - SYS___ABSF_B = 0x62A - SYS___ABSL_B = 0x62C - SYS___ABS_B = 0x62B - SYS___FMODF_B = 0x62D - SYS___FMODL_B = 0x62E - SYS___MODFF_B = 0x62F - SYS_ATANL = 0x63A - SYS_CEILF = 0x63B - SYS_CEILL = 0x63C - SYS_COSF = 0x63D - SYS_COSHF = 0x63F - SYS_COSL = 0x63E - SYS_REMOVE = 0x063 - SYS_POWL = 0x64A - SYS_RENAME = 0x064 - SYS_SINF = 0x64B - SYS_SINHF = 0x64F - SYS_SINL = 0x64C - SYS_SQRTF = 0x64D - SYS_SQRTL = 0x64E - SYS_BTOWC = 0x65F - SYS_FREXPL = 0x65A - SYS_LDEXPF = 0x65B - SYS_LDEXPL = 0x65C - SYS_MODFF = 0x65D - SYS_MODFL = 0x65E - SYS_TMPFILE = 0x065 - SYS_FREOPEN = 0x066 - SYS___CHARMAP_INIT_A = 0x66E - SYS___GETHOSTBYADDR_R_A = 0x66C - SYS___GETHOSTBYNAME_A = 0x66A - SYS___GETHOSTBYNAME_R_A = 0x66D - SYS___MBLEN_A = 0x66F - SYS___RES_INIT_A = 0x66B - SYS_FCLOSE = 0x067 - SYS___GETGRGID_R_A = 0x67D - SYS___WCSTOMBS_A = 0x67A - SYS___WCSTOMBS_STD_A = 0x67B - SYS___WCSWIDTH_A = 0x67C - SYS___WCSWIDTH_ASIA = 0x67F - SYS___WCSWIDTH_STD_A = 0x67E - SYS_FFLUSH = 0x068 - SYS___GETLOGIN_R_A = 0x68E - SYS___GETPWNAM_R_A = 0x68C - SYS___GETPWUID_R_A = 0x68D - SYS___TTYNAME_R_A = 0x68F - SYS___WCWIDTH_ASIA = 0x68B - SYS___WCWIDTH_STD_A = 0x68A - SYS_FOPEN = 0x069 - SYS___REGEXEC_A = 0x69A - SYS___REGEXEC_STD_A = 0x69B - SYS___REGFREE_A = 0x69C - SYS___REGFREE_STD_A = 0x69D - SYS___STRCOLL_A = 0x69E - SYS___STRCOLL_C_A = 0x69F - SYS_SCANF = 0x070 - SYS___A64L_A = 0x70C - SYS___ECVT_A = 0x70D - SYS___FCVT_A = 0x70E - SYS___GCVT_A = 0x70F - SYS___STRTOUL_A = 0x70A - SYS_____AE_CORRESTBL_QUERY_A = 0x70B - SYS_SPRINTF = 0x071 - SYS___ACCESS_A = 0x71F - SYS___CATOPEN_A = 0x71E - SYS___GETOPT_A = 0x71D - SYS___REALPATH_A = 0x71A - SYS___SETENV_A = 0x71B - SYS___SYSTEM_A = 0x71C - SYS_FGETC = 0x072 - SYS___GAI_STRERROR_A = 0x72F - SYS___RMDIR_A = 0x72A - SYS___STATVFS_A = 0x72B - SYS___SYMLINK_A = 0x72C - SYS___TRUNCATE_A = 0x72D - SYS___UNLINK_A = 0x72E - SYS_VFPRINTF = 0x073 - SYS___ISSPACE_A = 0x73A - SYS___ISUPPER_A = 0x73B - SYS___ISWALNUM_A = 0x73F - SYS___ISXDIGIT_A = 0x73C - SYS___TOLOWER_A = 0x73D - SYS___TOUPPER_A = 0x73E - SYS_VPRINTF = 0x074 - SYS___CONFSTR_A = 0x74B - SYS___FDOPEN_A = 0x74E - SYS___FLDATA_A = 0x74F - SYS___FTOK_A = 0x74C - SYS___ISWXDIGIT_A = 0x74A - SYS___MKTEMP_A = 0x74D - SYS_VSPRINTF = 0x075 - SYS___GETGRGID_A = 0x75A - SYS___GETGRNAM_A = 0x75B - SYS___GETGROUPSBYNAME_A = 0x75C - SYS___GETHOSTENT_A = 0x75D - SYS___GETHOSTNAME_A = 0x75E - SYS___GETLOGIN_A = 0x75F - SYS_GETC = 0x076 - SYS___CREATEWORKUNIT_A = 0x76A - SYS___CTERMID_A = 0x76B - SYS___FMTMSG_A = 0x76C - SYS___INITGROUPS_A = 0x76D - SYS___MSGRCV_A = 0x76F - SYS_____LOGIN_A = 0x76E - SYS_FGETS = 0x077 - SYS___STRCASECMP_A = 0x77B - SYS___STRNCASECMP_A = 0x77C - SYS___TTYNAME_A = 0x77D - SYS___UNAME_A = 0x77E - SYS___UTIMES_A = 0x77F - SYS_____SERVER_PWU_A = 0x77A - SYS_FPUTC = 0x078 - SYS___CREAT_O_A = 0x78E - SYS___ENVNA = 0x78F - SYS___FREAD_A = 0x78A - SYS___FWRITE_A = 0x78B - SYS___ISASCII = 0x78D - SYS___OPEN_O_A = 0x78C - SYS_FPUTS = 0x079 - SYS___ASCTIME_A = 0x79C - SYS___CTIME_A = 0x79D - SYS___GETDATE_A = 0x79E - SYS___GETSERVBYPORT_A = 0x79A - SYS___GETSERVENT_A = 0x79B - SYS___TZSET_A = 0x79F - SYS_ACL_FROM_TEXT = 0x80C - SYS_ACL_SET_FD = 0x80A - SYS_ACL_SET_FILE = 0x80B - SYS_ACL_SORT = 0x80E - SYS_ACL_TO_TEXT = 0x80D - SYS_UNGETC = 0x080 - SYS___SHUTDOWN_REGISTRATION = 0x80F - SYS_FREAD = 0x081 - SYS_FREEADDRINFO = 0x81A - SYS_GAI_STRERROR = 0x81B - SYS_REXEC_AF = 0x81C - SYS___DYNALLOC_A = 0x81F - SYS___POE = 0x81D - SYS_WCSTOMBS = 0x082 - SYS___INET_ADDR_A = 0x82F - SYS___NLIST_A = 0x82A - SYS_____TCGETCP_A = 0x82B - SYS_____TCSETCP_A = 0x82C - SYS_____W_PIOCTL_A = 0x82E - SYS_MBTOWC = 0x083 - SYS___CABEND = 0x83D - SYS___LE_CIB_GET = 0x83E - SYS___RECVMSG_A = 0x83B - SYS___SENDMSG_A = 0x83A - SYS___SET_LAA_FOR_JIT = 0x83F - SYS_____LCHATTR_A = 0x83C - SYS_WCTOMB = 0x084 - SYS___CBRTL_B = 0x84A - SYS___COPYSIGNF_B = 0x84B - SYS___COPYSIGNL_B = 0x84C - SYS___COTANF_B = 0x84D - SYS___COTANL_B = 0x84F - SYS___COTAN_B = 0x84E - SYS_MBSTOWCS = 0x085 - SYS___LOG1PL_B = 0x85A - SYS___LOG2F_B = 0x85B - SYS___LOG2L_B = 0x85D - SYS___LOG2_B = 0x85C - SYS___REMAINDERF_B = 0x85E - SYS___REMAINDERL_B = 0x85F - SYS_ACOSHF = 0x86E - SYS_ACOSHL = 0x86F - SYS_WCSCPY = 0x086 - SYS___ERFCF_B = 0x86D - SYS___ERFF_B = 0x86C - SYS___LROUNDF_B = 0x86A - SYS___LROUND_B = 0x86B - SYS_COTANL = 0x87A - SYS_EXP2F = 0x87B - SYS_EXP2L = 0x87C - SYS_EXPM1F = 0x87D - SYS_EXPM1L = 0x87E - SYS_FDIMF = 0x87F - SYS_WCSCAT = 0x087 - SYS___COTANL = 0x87A - SYS_REMAINDERF = 0x88A - SYS_REMAINDERL = 0x88B - SYS_REMAINDF = 0x88A - SYS_REMAINDL = 0x88B - SYS_REMQUO = 0x88D - SYS_REMQUOF = 0x88C - SYS_REMQUOL = 0x88E - SYS_TGAMMAF = 0x88F - SYS_WCSCHR = 0x088 - SYS_ERFCF = 0x89B - SYS_ERFCL = 0x89C - SYS_ERFL = 0x89A - SYS_EXP2 = 0x89E - SYS_WCSCMP = 0x089 - SYS___EXP2_B = 0x89D - SYS___FAR_JUMP = 0x89F - SYS_ABS = 0x090 - SYS___ERFCL_H = 0x90A - SYS___EXPF_H = 0x90C - SYS___EXPL_H = 0x90D - SYS___EXPM1_H = 0x90E - SYS___EXP_H = 0x90B - SYS___FDIM_H = 0x90F - SYS_DIV = 0x091 - SYS___LOG2F_H = 0x91F - SYS___LOG2_H = 0x91E - SYS___LOGB_H = 0x91D - SYS___LOGF_H = 0x91B - SYS___LOGL_H = 0x91C - SYS___LOG_H = 0x91A - SYS_LABS = 0x092 - SYS___POWL_H = 0x92A - SYS___REMAINDER_H = 0x92B - SYS___RINT_H = 0x92C - SYS___SCALB_H = 0x92D - SYS___SINF_H = 0x92F - SYS___SIN_H = 0x92E - SYS_STRNCPY = 0x093 - SYS___TANHF_H = 0x93B - SYS___TANHL_H = 0x93C - SYS___TANH_H = 0x93A - SYS___TGAMMAF_H = 0x93E - SYS___TGAMMA_H = 0x93D - SYS___TRUNC_H = 0x93F - SYS_MEMCPY = 0x094 - SYS_VFWSCANF = 0x94A - SYS_VSWSCANF = 0x94E - SYS_VWSCANF = 0x94C - SYS_INET6_RTH_ADD = 0x95D - SYS_INET6_RTH_INIT = 0x95C - SYS_INET6_RTH_REVERSE = 0x95E - SYS_INET6_RTH_SEGMENTS = 0x95F - SYS_INET6_RTH_SPACE = 0x95B - SYS_MEMMOVE = 0x095 - SYS_WCSTOLD = 0x95A - SYS_STRCPY = 0x096 - SYS_STRCMP = 0x097 - SYS_CABS = 0x98E - SYS_STRCAT = 0x098 - SYS___CABS_B = 0x98F - SYS___POW_II = 0x98A - SYS___POW_II_B = 0x98B - SYS___POW_II_H = 0x98C - SYS_CACOSF = 0x99A - SYS_CACOSL = 0x99D - SYS_STRNCAT = 0x099 - SYS___CACOSF_B = 0x99B - SYS___CACOSF_H = 0x99C - SYS___CACOSL_B = 0x99E - SYS___CACOSL_H = 0x99F - SYS_ISWALPHA = 0x100 - SYS_ISWBLANK = 0x101 - SYS___ISWBLK = 0x101 - SYS_ISWCNTRL = 0x102 - SYS_ISWDIGIT = 0x103 - SYS_ISWGRAPH = 0x104 - SYS_ISWLOWER = 0x105 - SYS_ISWPRINT = 0x106 - SYS_ISWPUNCT = 0x107 - SYS_ISWSPACE = 0x108 - SYS_ISWUPPER = 0x109 - SYS_WCTOB = 0x110 - SYS_MBRLEN = 0x111 - SYS_MBRTOWC = 0x112 - SYS_MBSRTOWC = 0x113 - SYS_MBSRTOWCS = 0x113 - SYS_WCRTOMB = 0x114 - SYS_WCSRTOMB = 0x115 - SYS_WCSRTOMBS = 0x115 - SYS___CSID = 0x116 - SYS___WCSID = 0x117 - SYS_STRPTIME = 0x118 - SYS___STRPTM = 0x118 - SYS_STRFMON = 0x119 - SYS_WCSCOLL = 0x130 - SYS_WCSXFRM = 0x131 - SYS_WCSWIDTH = 0x132 - SYS_WCWIDTH = 0x133 - SYS_WCSFTIME = 0x134 - SYS_SWPRINTF = 0x135 - SYS_VSWPRINT = 0x136 - SYS_VSWPRINTF = 0x136 - SYS_SWSCANF = 0x137 - SYS_REGCOMP = 0x138 - SYS_REGEXEC = 0x139 - SYS_GETWC = 0x140 - SYS_GETWCHAR = 0x141 - SYS_PUTWC = 0x142 - SYS_PUTWCHAR = 0x143 - SYS_UNGETWC = 0x144 - SYS_ICONV_OPEN = 0x145 - SYS_ICONV = 0x146 - SYS_ICONV_CLOSE = 0x147 - SYS_COLLRANGE = 0x150 - SYS_CCLASS = 0x151 - SYS_COLLORDER = 0x152 - SYS___DEMANGLE = 0x154 - SYS_FDOPEN = 0x155 - SYS___ERRNO = 0x156 - SYS___ERRNO2 = 0x157 - SYS___TERROR = 0x158 - SYS_MAXCOLL = 0x169 - SYS_DLLLOAD = 0x170 - SYS__EXIT = 0x174 - SYS_ACCESS = 0x175 - SYS_ALARM = 0x176 - SYS_CFGETISPEED = 0x177 - SYS_CFGETOSPEED = 0x178 - SYS_CFSETISPEED = 0x179 - SYS_CREAT = 0x180 - SYS_CTERMID = 0x181 - SYS_DUP = 0x182 - SYS_DUP2 = 0x183 - SYS_EXECL = 0x184 - SYS_EXECLE = 0x185 - SYS_EXECLP = 0x186 - SYS_EXECV = 0x187 - SYS_EXECVE = 0x188 - SYS_EXECVP = 0x189 - SYS_FSTAT = 0x190 - SYS_FSYNC = 0x191 - SYS_FTRUNCATE = 0x192 - SYS_GETCWD = 0x193 - SYS_GETEGID = 0x194 - SYS_GETEUID = 0x195 - SYS_GETGID = 0x196 - SYS_GETGRGID = 0x197 - SYS_GETGRNAM = 0x198 - SYS_GETGROUPS = 0x199 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 - SYS_PTHREAD_MUTEX_INIT = 0x203 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 - SYS_PTHREAD_MUTEX_LOCK = 0x205 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 - SYS_PTHREAD_ONCE = 0x209 - SYS_TW_OPEN = 0x210 - SYS_TW_FCNTL = 0x211 - SYS_PTHREAD_JOIN_D4_NP = 0x212 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 - SYS_EXTLINK_NP = 0x215 - SYS___PASSWD = 0x216 - SYS_SETGROUPS = 0x217 - SYS_INITGROUPS = 0x218 - SYS_WCSRCHR = 0x240 - SYS_SVC99 = 0x241 - SYS___SVC99 = 0x241 - SYS_WCSWCS = 0x242 - SYS_LOCALECO = 0x243 - SYS_LOCALECONV = 0x243 - SYS___LIBREL = 0x244 - SYS_RELEASE = 0x245 - SYS___RLSE = 0x245 - SYS_FLOCATE = 0x246 - SYS___FLOCT = 0x246 - SYS_FDELREC = 0x247 - SYS___FDLREC = 0x247 - SYS_FETCH = 0x248 - SYS___FETCH = 0x248 - SYS_QSORT = 0x249 - SYS___CLEANUPCATCH = 0x260 - SYS___CATCHMATCH = 0x261 - SYS___CLEAN2UPCATCH = 0x262 - SYS_GETPRIORITY = 0x270 - SYS_NICE = 0x271 - SYS_SETPRIORITY = 0x272 - SYS_GETITIMER = 0x273 - SYS_SETITIMER = 0x274 - SYS_MSGCTL = 0x275 - SYS_MSGGET = 0x276 - SYS_MSGRCV = 0x277 - SYS_MSGSND = 0x278 - SYS_MSGXRCV = 0x279 - SYS___MSGXR = 0x279 - SYS_SHMGET = 0x280 - SYS___GETIPC = 0x281 - SYS_SETGRENT = 0x282 - SYS_GETGRENT = 0x283 - SYS_ENDGRENT = 0x284 - SYS_SETPWENT = 0x285 - SYS_GETPWENT = 0x286 - SYS_ENDPWENT = 0x287 - SYS_BSD_SIGNAL = 0x288 - SYS_KILLPG = 0x289 - SYS_SIGSET = 0x290 - SYS_SIGSTACK = 0x291 - SYS_GETRLIMIT = 0x292 - SYS_SETRLIMIT = 0x293 - SYS_GETRUSAGE = 0x294 - SYS_MMAP = 0x295 - SYS_MPROTECT = 0x296 - SYS_MSYNC = 0x297 - SYS_MUNMAP = 0x298 - SYS_CONFSTR = 0x299 - SYS___NDMTRM = 0x300 - SYS_FTOK = 0x301 - SYS_BASENAME = 0x302 - SYS_DIRNAME = 0x303 - SYS_GETDTABLESIZE = 0x304 - SYS_MKSTEMP = 0x305 - SYS_MKTEMP = 0x306 - SYS_NFTW = 0x307 - SYS_GETWD = 0x308 - SYS_LOCKF = 0x309 - SYS_WORDEXP = 0x310 - SYS_WORDFREE = 0x311 - SYS_GETPGID = 0x312 - SYS_GETSID = 0x313 - SYS___UTMPXNAME = 0x314 - SYS_CUSERID = 0x315 - SYS_GETPASS = 0x316 - SYS_FNMATCH = 0x317 - SYS_FTW = 0x318 - SYS_GETW = 0x319 - SYS_ACOSH = 0x320 - SYS_ASINH = 0x321 - SYS_ATANH = 0x322 - SYS_CBRT = 0x323 - SYS_EXPM1 = 0x324 - SYS_ILOGB = 0x325 - SYS_LOGB = 0x326 - SYS_LOG1P = 0x327 - SYS_NEXTAFTER = 0x328 - SYS_RINT = 0x329 - SYS_SPAWN = 0x330 - SYS_SPAWNP = 0x331 - SYS_GETLOGIN_UU = 0x332 - SYS_ECVT = 0x333 - SYS_FCVT = 0x334 - SYS_GCVT = 0x335 - SYS_ACCEPT = 0x336 - SYS_BIND = 0x337 - SYS_CONNECT = 0x338 - SYS_ENDHOSTENT = 0x339 - SYS_GETHOSTENT = 0x340 - SYS_GETHOSTID = 0x341 - SYS_GETHOSTNAME = 0x342 - SYS_GETNETBYADDR = 0x343 - SYS_GETNETBYNAME = 0x344 - SYS_GETNETENT = 0x345 - SYS_GETPEERNAME = 0x346 - SYS_GETPROTOBYNAME = 0x347 - SYS_GETPROTOBYNUMBER = 0x348 - SYS_GETPROTOENT = 0x349 - SYS_INET_LNAOF = 0x350 - SYS_INET_MAKEADDR = 0x351 - SYS_INET_NETOF = 0x352 - SYS_INET_NETWORK = 0x353 - SYS_INET_NTOA = 0x354 - SYS_IOCTL = 0x355 - SYS_LISTEN = 0x356 - SYS_READV = 0x357 - SYS_RECV = 0x358 - SYS_RECVFROM = 0x359 - SYS_SETHOSTENT = 0x360 - SYS_SETNETENT = 0x361 - SYS_SETPEER = 0x362 - SYS_SETPROTOENT = 0x363 - SYS_SETSERVENT = 0x364 - SYS_SETSOCKOPT = 0x365 - SYS_SHUTDOWN = 0x366 - SYS_SOCKET = 0x367 - SYS_SOCKETPAIR = 0x368 - SYS_WRITEV = 0x369 - SYS_ENDNETENT = 0x370 - SYS_CLOSELOG = 0x371 - SYS_OPENLOG = 0x372 - SYS_SETLOGMASK = 0x373 - SYS_SYSLOG = 0x374 - SYS_PTSNAME = 0x375 - SYS_SETREUID = 0x376 - SYS_SETREGID = 0x377 - SYS_REALPATH = 0x378 - SYS___SIGNGAM = 0x379 - SYS_POLL = 0x380 - SYS_REXEC = 0x381 - SYS___ISASCII2 = 0x382 - SYS___TOASCII2 = 0x383 - SYS_CHPRIORITY = 0x384 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 - SYS___STNETENT = 0x388 - SYS___STPROTOENT = 0x389 - SYS___SELECT1 = 0x390 - SYS_PTHREAD_SECURITY_NP = 0x391 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 - SYS___CONVERT_ID_NP = 0x393 - SYS___OPENVMREL = 0x394 - SYS_WMEMCHR = 0x395 - SYS_WMEMCMP = 0x396 - SYS_WMEMCPY = 0x397 - SYS_WMEMMOVE = 0x398 - SYS_WMEMSET = 0x399 - SYS___FPUTWC = 0x400 - SYS___PUTWC = 0x401 - SYS___PWCHAR = 0x402 - SYS___WCSFTM = 0x403 - SYS___WCSTOK = 0x404 - SYS___WCWDTH = 0x405 - SYS_T_ACCEPT = 0x409 - SYS_T_GETINFO = 0x410 - SYS_T_GETPROTADDR = 0x411 - SYS_T_GETSTATE = 0x412 - SYS_T_LISTEN = 0x413 - SYS_T_LOOK = 0x414 - SYS_T_OPEN = 0x415 - SYS_T_OPTMGMT = 0x416 - SYS_T_RCV = 0x417 - SYS_T_RCVCONNECT = 0x418 - SYS_T_RCVDIS = 0x419 - SYS_T_SNDUDATA = 0x420 - SYS_T_STRERROR = 0x421 - SYS_T_SYNC = 0x422 - SYS_T_UNBIND = 0x423 - SYS___T_ERRNO = 0x424 - SYS___RECVMSG2 = 0x425 - SYS___SENDMSG2 = 0x426 - SYS_FATTACH = 0x427 - SYS_FDETACH = 0x428 - SYS_GETMSG = 0x429 - SYS_GETCONTEXT = 0x430 - SYS_SETCONTEXT = 0x431 - SYS_MAKECONTEXT = 0x432 - SYS_SWAPCONTEXT = 0x433 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 - SYS_GETCLIENTID = 0x470 - SYS___GETCLIENTID = 0x471 - SYS_GETSTABLESIZE = 0x472 - SYS_GETIBMOPT = 0x473 - SYS_GETIBMSOCKOPT = 0x474 - SYS_GIVESOCKET = 0x475 - SYS_IBMSFLUSH = 0x476 - SYS_MAXDESC = 0x477 - SYS_SETIBMOPT = 0x478 - SYS_SETIBMSOCKOPT = 0x479 - SYS___SERVER_PWU = 0x480 - SYS_PTHREAD_TAG_NP = 0x481 - SYS___CONSOLE = 0x482 - SYS___WSINIT = 0x483 - SYS___IPTCPN = 0x489 - SYS___SERVER_CLASSIFY = 0x490 - SYS___HEAPRPT = 0x496 - SYS___ISBFP = 0x500 - SYS___FP_CAST = 0x501 - SYS___CERTIFICATE = 0x502 - SYS_SEND_FILE = 0x503 - SYS_AIO_CANCEL = 0x504 - SYS_AIO_ERROR = 0x505 - SYS_AIO_READ = 0x506 - SYS_AIO_RETURN = 0x507 - SYS_AIO_SUSPEND = 0x508 - SYS_AIO_WRITE = 0x509 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 - SYS___CTTBL = 0x517 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 - SYS___FP_UNORDERED = 0x520 - SYS___FP_READ_RND = 0x521 - SYS___FP_READ_RND_B = 0x522 - SYS___FP_SWAP_RND = 0x523 - SYS___FP_SWAP_RND_B = 0x524 - SYS___FP_LEVEL = 0x525 - SYS___FP_BTOH = 0x526 - SYS___FP_HTOB = 0x527 - SYS___FPC_RD = 0x528 - SYS___FPC_WR = 0x529 - SYS_PTHREAD_SETCANCELTYPE = 0x600 - SYS_PTHREAD_TESTCANCEL = 0x601 - SYS___ATANF_B = 0x602 - SYS___ATANL_B = 0x603 - SYS___CEILF_B = 0x604 - SYS___CEILL_B = 0x605 - SYS___COSF_B = 0x606 - SYS___COSL_B = 0x607 - SYS___FABSF_B = 0x608 - SYS___FABSL_B = 0x609 - SYS___SINF_B = 0x610 - SYS___SINL_B = 0x611 - SYS___TANF_B = 0x612 - SYS___TANL_B = 0x613 - SYS___TANHF_B = 0x614 - SYS___TANHL_B = 0x615 - SYS___ACOSF_B = 0x616 - SYS___ACOSL_B = 0x617 - SYS___ASINF_B = 0x618 - SYS___ASINL_B = 0x619 - SYS___LOGF_B = 0x620 - SYS___LOGL_B = 0x621 - SYS___LOG10F_B = 0x622 - SYS___LOG10L_B = 0x623 - SYS___POWF_B = 0x624 - SYS___POWL_B = 0x625 - SYS___SINHF_B = 0x626 - SYS___SINHL_B = 0x627 - SYS___SQRTF_B = 0x628 - SYS___SQRTL_B = 0x629 - SYS___MODFL_B = 0x630 - SYS_ABSF = 0x631 - SYS_ABSL = 0x632 - SYS_ACOSF = 0x633 - SYS_ACOSL = 0x634 - SYS_ASINF = 0x635 - SYS_ASINL = 0x636 - SYS_ATAN2F = 0x637 - SYS_ATAN2L = 0x638 - SYS_ATANF = 0x639 - SYS_COSHL = 0x640 - SYS_EXPF = 0x641 - SYS_EXPL = 0x642 - SYS_TANHF = 0x643 - SYS_TANHL = 0x644 - SYS_LOG10F = 0x645 - SYS_LOG10L = 0x646 - SYS_LOGF = 0x647 - SYS_LOGL = 0x648 - SYS_POWF = 0x649 - SYS_SINHL = 0x650 - SYS_TANF = 0x651 - SYS_TANL = 0x652 - SYS_FABSF = 0x653 - SYS_FABSL = 0x654 - SYS_FLOORF = 0x655 - SYS_FLOORL = 0x656 - SYS_FMODF = 0x657 - SYS_FMODL = 0x658 - SYS_FREXPF = 0x659 - SYS___CHATTR = 0x660 - SYS___FCHATTR = 0x661 - SYS___TOCCSID = 0x662 - SYS___CSNAMETYPE = 0x663 - SYS___TOCSNAME = 0x664 - SYS___CCSIDTYPE = 0x665 - SYS___AE_CORRESTBL_QUERY = 0x666 - SYS___AE_AUTOCONVERT_STATE = 0x667 - SYS_DN_FIND = 0x668 - SYS___GETHOSTBYADDR_A = 0x669 - SYS___MBLEN_SB_A = 0x670 - SYS___MBLEN_STD_A = 0x671 - SYS___MBLEN_UTF = 0x672 - SYS___MBSTOWCS_A = 0x673 - SYS___MBSTOWCS_STD_A = 0x674 - SYS___MBTOWC_A = 0x675 - SYS___MBTOWC_ISO1 = 0x676 - SYS___MBTOWC_SBCS = 0x677 - SYS___MBTOWC_MBCS = 0x678 - SYS___MBTOWC_UTF = 0x679 - SYS___CSID_A = 0x680 - SYS___CSID_STD_A = 0x681 - SYS___WCSID_A = 0x682 - SYS___WCSID_STD_A = 0x683 - SYS___WCTOMB_A = 0x684 - SYS___WCTOMB_ISO1 = 0x685 - SYS___WCTOMB_STD_A = 0x686 - SYS___WCTOMB_UTF = 0x687 - SYS___WCWIDTH_A = 0x688 - SYS___GETGRNAM_R_A = 0x689 - SYS___READDIR_R_A = 0x690 - SYS___E2A_S = 0x691 - SYS___FNMATCH_A = 0x692 - SYS___FNMATCH_C_A = 0x693 - SYS___EXECL_A = 0x694 - SYS___FNMATCH_STD_A = 0x695 - SYS___REGCOMP_A = 0x696 - SYS___REGCOMP_STD_A = 0x697 - SYS___REGERROR_A = 0x698 - SYS___REGERROR_STD_A = 0x699 - SYS___SWPRINTF_A = 0x700 - SYS___FSCANF_A = 0x701 - SYS___SCANF_A = 0x702 - SYS___SSCANF_A = 0x703 - SYS___SWSCANF_A = 0x704 - SYS___ATOF_A = 0x705 - SYS___ATOI_A = 0x706 - SYS___ATOL_A = 0x707 - SYS___STRTOD_A = 0x708 - SYS___STRTOL_A = 0x709 - SYS___L64A_A = 0x710 - SYS___STRERROR_A = 0x711 - SYS___PERROR_A = 0x712 - SYS___FETCH_A = 0x713 - SYS___GETENV_A = 0x714 - SYS___MKSTEMP_A = 0x717 - SYS___PTSNAME_A = 0x718 - SYS___PUTENV_A = 0x719 - SYS___CHDIR_A = 0x720 - SYS___CHOWN_A = 0x721 - SYS___CHROOT_A = 0x722 - SYS___GETCWD_A = 0x723 - SYS___GETWD_A = 0x724 - SYS___LCHOWN_A = 0x725 - SYS___LINK_A = 0x726 - SYS___PATHCONF_A = 0x727 - SYS___IF_NAMEINDEX_A = 0x728 - SYS___READLINK_A = 0x729 - SYS___EXTLINK_NP_A = 0x730 - SYS___ISALNUM_A = 0x731 - SYS___ISALPHA_A = 0x732 - SYS___A2E_S = 0x733 - SYS___ISCNTRL_A = 0x734 - SYS___ISDIGIT_A = 0x735 - SYS___ISGRAPH_A = 0x736 - SYS___ISLOWER_A = 0x737 - SYS___ISPRINT_A = 0x738 - SYS___ISPUNCT_A = 0x739 - SYS___ISWALPHA_A = 0x740 - SYS___A2E_L = 0x741 - SYS___ISWCNTRL_A = 0x742 - SYS___ISWDIGIT_A = 0x743 - SYS___ISWGRAPH_A = 0x744 - SYS___ISWLOWER_A = 0x745 - SYS___ISWPRINT_A = 0x746 - SYS___ISWPUNCT_A = 0x747 - SYS___ISWSPACE_A = 0x748 - SYS___ISWUPPER_A = 0x749 - SYS___REMOVE_A = 0x750 - SYS___RENAME_A = 0x751 - SYS___TMPNAM_A = 0x752 - SYS___FOPEN_A = 0x753 - SYS___FREOPEN_A = 0x754 - SYS___CUSERID_A = 0x755 - SYS___POPEN_A = 0x756 - SYS___TEMPNAM_A = 0x757 - SYS___FTW_A = 0x758 - SYS___GETGRENT_A = 0x759 - SYS___INET_NTOP_A = 0x760 - SYS___GETPASS_A = 0x761 - SYS___GETPWENT_A = 0x762 - SYS___GETPWNAM_A = 0x763 - SYS___GETPWUID_A = 0x764 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 - SYS___CHECKSCHENV_A = 0x766 - SYS___CONNECTSERVER_A = 0x767 - SYS___CONNECTWORKMGR_A = 0x768 - SYS_____CONSOLE_A = 0x769 - SYS___MSGSND_A = 0x770 - SYS___MSGXRCV_A = 0x771 - SYS___NFTW_A = 0x772 - SYS_____PASSWD_A = 0x773 - SYS___PTHREAD_SECURITY_NP_A = 0x774 - SYS___QUERYMETRICS_A = 0x775 - SYS___QUERYSCHENV = 0x776 - SYS___READV_A = 0x777 - SYS_____SERVER_CLASSIFY_A = 0x778 - SYS_____SERVER_INIT_A = 0x779 - SYS___W_GETPSENT_A = 0x780 - SYS___WRITEV_A = 0x781 - SYS___W_STATFS_A = 0x782 - SYS___W_STATVFS_A = 0x783 - SYS___FPUTC_A = 0x784 - SYS___PUTCHAR_A = 0x785 - SYS___PUTS_A = 0x786 - SYS___FGETS_A = 0x787 - SYS___GETS_A = 0x788 - SYS___FPUTS_A = 0x789 - SYS___PUTC_A = 0x790 - SYS___AE_THREAD_SETMODE = 0x791 - SYS___AE_THREAD_SWAPMODE = 0x792 - SYS___GETNETBYADDR_A = 0x793 - SYS___GETNETBYNAME_A = 0x794 - SYS___GETNETENT_A = 0x795 - SYS___GETPROTOBYNAME_A = 0x796 - SYS___GETPROTOBYNUMBER_A = 0x797 - SYS___GETPROTOENT_A = 0x798 - SYS___GETSERVBYNAME_A = 0x799 - SYS_ACL_FIRST_ENTRY = 0x800 - SYS_ACL_GET_ENTRY = 0x801 - SYS_ACL_VALID = 0x802 - SYS_ACL_CREATE_ENTRY = 0x803 - SYS_ACL_DELETE_ENTRY = 0x804 - SYS_ACL_UPDATE_ENTRY = 0x805 - SYS_ACL_DELETE_FD = 0x806 - SYS_ACL_DELETE_FILE = 0x807 - SYS_ACL_GET_FD = 0x808 - SYS_ACL_GET_FILE = 0x809 - SYS___ERFL_B = 0x810 - SYS___ERFCL_B = 0x811 - SYS___LGAMMAL_B = 0x812 - SYS___SETHOOKEVENTS = 0x813 - SYS_IF_NAMETOINDEX = 0x814 - SYS_IF_INDEXTONAME = 0x815 - SYS_IF_NAMEINDEX = 0x816 - SYS_IF_FREENAMEINDEX = 0x817 - SYS_GETADDRINFO = 0x818 - SYS_GETNAMEINFO = 0x819 - SYS___DYNFREE_A = 0x820 - SYS___RES_QUERY_A = 0x821 - SYS___RES_SEARCH_A = 0x822 - SYS___RES_QUERYDOMAIN_A = 0x823 - SYS___RES_MKQUERY_A = 0x824 - SYS___RES_SEND_A = 0x825 - SYS___DN_EXPAND_A = 0x826 - SYS___DN_SKIPNAME_A = 0x827 - SYS___DN_COMP_A = 0x828 - SYS___DN_FIND_A = 0x829 - SYS___INET_NTOA_A = 0x830 - SYS___INET_NETWORK_A = 0x831 - SYS___ACCEPT_A = 0x832 - SYS___ACCEPT_AND_RECV_A = 0x833 - SYS___BIND_A = 0x834 - SYS___CONNECT_A = 0x835 - SYS___GETPEERNAME_A = 0x836 - SYS___GETSOCKNAME_A = 0x837 - SYS___RECVFROM_A = 0x838 - SYS___SENDTO_A = 0x839 - SYS___LCHATTR = 0x840 - SYS___WRITEDOWN = 0x841 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 - SYS___ACOSHF_B = 0x843 - SYS___ACOSHL_B = 0x844 - SYS___ASINHF_B = 0x845 - SYS___ASINHL_B = 0x846 - SYS___ATANHF_B = 0x847 - SYS___ATANHL_B = 0x848 - SYS___CBRTF_B = 0x849 - SYS___EXP2F_B = 0x850 - SYS___EXP2L_B = 0x851 - SYS___EXPM1F_B = 0x852 - SYS___EXPM1L_B = 0x853 - SYS___FDIMF_B = 0x854 - SYS___FDIM_B = 0x855 - SYS___FDIML_B = 0x856 - SYS___HYPOTF_B = 0x857 - SYS___HYPOTL_B = 0x858 - SYS___LOG1PF_B = 0x859 - SYS___REMQUOF_B = 0x860 - SYS___REMQUO_B = 0x861 - SYS___REMQUOL_B = 0x862 - SYS___TGAMMAF_B = 0x863 - SYS___TGAMMA_B = 0x864 - SYS___TGAMMAL_B = 0x865 - SYS___TRUNCF_B = 0x866 - SYS___TRUNC_B = 0x867 - SYS___TRUNCL_B = 0x868 - SYS___LGAMMAF_B = 0x869 - SYS_ASINHF = 0x870 - SYS_ASINHL = 0x871 - SYS_ATANHF = 0x872 - SYS_ATANHL = 0x873 - SYS_CBRTF = 0x874 - SYS_CBRTL = 0x875 - SYS_COPYSIGNF = 0x876 - SYS_CPYSIGNF = 0x876 - SYS_COPYSIGNL = 0x877 - SYS_CPYSIGNL = 0x877 - SYS_COTANF = 0x878 - SYS___COTANF = 0x878 - SYS_COTAN = 0x879 - SYS___COTAN = 0x879 - SYS_FDIM = 0x881 - SYS_FDIML = 0x882 - SYS_HYPOTF = 0x883 - SYS_HYPOTL = 0x884 - SYS_LOG1PF = 0x885 - SYS_LOG1PL = 0x886 - SYS_LOG2F = 0x887 - SYS_LOG2 = 0x888 - SYS_LOG2L = 0x889 - SYS_TGAMMA = 0x890 - SYS_TGAMMAL = 0x891 - SYS_TRUNCF = 0x892 - SYS_TRUNC = 0x893 - SYS_TRUNCL = 0x894 - SYS_LGAMMAF = 0x895 - SYS_LGAMMAL = 0x896 - SYS_LROUNDF = 0x897 - SYS_LROUND = 0x898 - SYS_ERFF = 0x899 - SYS___COSHF_H = 0x900 - SYS___COSHL_H = 0x901 - SYS___COTAN_H = 0x902 - SYS___COTANF_H = 0x903 - SYS___COTANL_H = 0x904 - SYS___ERF_H = 0x905 - SYS___ERFF_H = 0x906 - SYS___ERFL_H = 0x907 - SYS___ERFC_H = 0x908 - SYS___ERFCF_H = 0x909 - SYS___FDIMF_H = 0x910 - SYS___FDIML_H = 0x911 - SYS___FMOD_H = 0x912 - SYS___FMODF_H = 0x913 - SYS___FMODL_H = 0x914 - SYS___GAMMA_H = 0x915 - SYS___HYPOT_H = 0x916 - SYS___ILOGB_H = 0x917 - SYS___LGAMMA_H = 0x918 - SYS___LGAMMAF_H = 0x919 - SYS___LOG2L_H = 0x920 - SYS___LOG1P_H = 0x921 - SYS___LOG10_H = 0x922 - SYS___LOG10F_H = 0x923 - SYS___LOG10L_H = 0x924 - SYS___LROUND_H = 0x925 - SYS___LROUNDF_H = 0x926 - SYS___NEXTAFTER_H = 0x927 - SYS___POW_H = 0x928 - SYS___POWF_H = 0x929 - SYS___SINL_H = 0x930 - SYS___SINH_H = 0x931 - SYS___SINHF_H = 0x932 - SYS___SINHL_H = 0x933 - SYS___SQRT_H = 0x934 - SYS___SQRTF_H = 0x935 - SYS___SQRTL_H = 0x936 - SYS___TAN_H = 0x937 - SYS___TANF_H = 0x938 - SYS___TANL_H = 0x939 - SYS___TRUNCF_H = 0x940 - SYS___TRUNCL_H = 0x941 - SYS___COSH_H = 0x942 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 - SYS_VFSCANF = 0x944 - SYS_VSCANF = 0x946 - SYS_VSSCANF = 0x948 - SYS_IMAXABS = 0x950 - SYS_IMAXDIV = 0x951 - SYS_STRTOIMAX = 0x952 - SYS_STRTOUMAX = 0x953 - SYS_WCSTOIMAX = 0x954 - SYS_WCSTOUMAX = 0x955 - SYS_ATOLL = 0x956 - SYS_STRTOF = 0x957 - SYS_STRTOLD = 0x958 - SYS_WCSTOF = 0x959 - SYS_INET6_RTH_GETADDR = 0x960 - SYS_INET6_OPT_INIT = 0x961 - SYS_INET6_OPT_APPEND = 0x962 - SYS_INET6_OPT_FINISH = 0x963 - SYS_INET6_OPT_SET_VAL = 0x964 - SYS_INET6_OPT_NEXT = 0x965 - SYS_INET6_OPT_FIND = 0x966 - SYS_INET6_OPT_GET_VAL = 0x967 - SYS___POW_I = 0x987 - SYS___POW_I_B = 0x988 - SYS___POW_I_H = 0x989 - SYS___CABS_H = 0x990 - SYS_CABSF = 0x991 - SYS___CABSF_B = 0x992 - SYS___CABSF_H = 0x993 - SYS_CABSL = 0x994 - SYS___CABSL_B = 0x995 - SYS___CABSL_H = 0x996 - SYS_CACOS = 0x997 - SYS___CACOS_B = 0x998 - SYS___CACOS_H = 0x999 + SYS_LOG = 0x17 // 23 + SYS_COSH = 0x18 // 24 + SYS_TANH = 0x19 // 25 + SYS_EXP = 0x1A // 26 + SYS_MODF = 0x1B // 27 + SYS_LOG10 = 0x1C // 28 + SYS_FREXP = 0x1D // 29 + SYS_LDEXP = 0x1E // 30 + SYS_CEIL = 0x1F // 31 + SYS_POW = 0x20 // 32 + SYS_SQRT = 0x21 // 33 + SYS_FLOOR = 0x22 // 34 + SYS_J1 = 0x23 // 35 + SYS_FABS = 0x24 // 36 + SYS_FMOD = 0x25 // 37 + SYS_J0 = 0x26 // 38 + SYS_YN = 0x27 // 39 + SYS_JN = 0x28 // 40 + SYS_Y0 = 0x29 // 41 + SYS_Y1 = 0x2A // 42 + SYS_HYPOT = 0x2B // 43 + SYS_ERF = 0x2C // 44 + SYS_ERFC = 0x2D // 45 + SYS_GAMMA = 0x2E // 46 + SYS_ISALPHA = 0x30 // 48 + SYS_ISALNUM = 0x31 // 49 + SYS_ISLOWER = 0x32 // 50 + SYS_ISCNTRL = 0x33 // 51 + SYS_ISDIGIT = 0x34 // 52 + SYS_ISGRAPH = 0x35 // 53 + SYS_ISUPPER = 0x36 // 54 + SYS_ISPRINT = 0x37 // 55 + SYS_ISPUNCT = 0x38 // 56 + SYS_ISSPACE = 0x39 // 57 + SYS_SETLOCAL = 0x3A // 58 + SYS_SETLOCALE = 0x3A // 58 + SYS_ISXDIGIT = 0x3B // 59 + SYS_TOLOWER = 0x3C // 60 + SYS_TOUPPER = 0x3D // 61 + SYS_ASIN = 0x3E // 62 + SYS_SIN = 0x3F // 63 + SYS_COS = 0x40 // 64 + SYS_TAN = 0x41 // 65 + SYS_SINH = 0x42 // 66 + SYS_ACOS = 0x43 // 67 + SYS_ATAN = 0x44 // 68 + SYS_ATAN2 = 0x45 // 69 + SYS_FTELL = 0x46 // 70 + SYS_FGETPOS = 0x47 // 71 + SYS_FSEEK = 0x48 // 72 + SYS_FSETPOS = 0x49 // 73 + SYS_FERROR = 0x4A // 74 + SYS_REWIND = 0x4B // 75 + SYS_CLEARERR = 0x4C // 76 + SYS_FEOF = 0x4D // 77 + SYS_ATOL = 0x4E // 78 + SYS_PERROR = 0x4F // 79 + SYS_ATOF = 0x50 // 80 + SYS_ATOI = 0x51 // 81 + SYS_RAND = 0x52 // 82 + SYS_STRTOD = 0x53 // 83 + SYS_STRTOL = 0x54 // 84 + SYS_STRTOUL = 0x55 // 85 + SYS_MALLOC = 0x56 // 86 + SYS_SRAND = 0x57 // 87 + SYS_CALLOC = 0x58 // 88 + SYS_FREE = 0x59 // 89 + SYS_EXIT = 0x5A // 90 + SYS_REALLOC = 0x5B // 91 + SYS_ABORT = 0x5C // 92 + SYS___ABORT = 0x5C // 92 + SYS_ATEXIT = 0x5D // 93 + SYS_RAISE = 0x5E // 94 + SYS_SETJMP = 0x5F // 95 + SYS_LONGJMP = 0x60 // 96 + SYS_SIGNAL = 0x61 // 97 + SYS_TMPNAM = 0x62 // 98 + SYS_REMOVE = 0x63 // 99 + SYS_RENAME = 0x64 // 100 + SYS_TMPFILE = 0x65 // 101 + SYS_FREOPEN = 0x66 // 102 + SYS_FCLOSE = 0x67 // 103 + SYS_FFLUSH = 0x68 // 104 + SYS_FOPEN = 0x69 // 105 + SYS_FSCANF = 0x6A // 106 + SYS_SETBUF = 0x6B // 107 + SYS_SETVBUF = 0x6C // 108 + SYS_FPRINTF = 0x6D // 109 + SYS_SSCANF = 0x6E // 110 + SYS_PRINTF = 0x6F // 111 + SYS_SCANF = 0x70 // 112 + SYS_SPRINTF = 0x71 // 113 + SYS_FGETC = 0x72 // 114 + SYS_VFPRINTF = 0x73 // 115 + SYS_VPRINTF = 0x74 // 116 + SYS_VSPRINTF = 0x75 // 117 + SYS_GETC = 0x76 // 118 + SYS_FGETS = 0x77 // 119 + SYS_FPUTC = 0x78 // 120 + SYS_FPUTS = 0x79 // 121 + SYS_PUTCHAR = 0x7A // 122 + SYS_GETCHAR = 0x7B // 123 + SYS_GETS = 0x7C // 124 + SYS_PUTC = 0x7D // 125 + SYS_FWRITE = 0x7E // 126 + SYS_PUTS = 0x7F // 127 + SYS_UNGETC = 0x80 // 128 + SYS_FREAD = 0x81 // 129 + SYS_WCSTOMBS = 0x82 // 130 + SYS_MBTOWC = 0x83 // 131 + SYS_WCTOMB = 0x84 // 132 + SYS_MBSTOWCS = 0x85 // 133 + SYS_WCSCPY = 0x86 // 134 + SYS_WCSCAT = 0x87 // 135 + SYS_WCSCHR = 0x88 // 136 + SYS_WCSCMP = 0x89 // 137 + SYS_WCSNCMP = 0x8A // 138 + SYS_WCSCSPN = 0x8B // 139 + SYS_WCSLEN = 0x8C // 140 + SYS_WCSNCAT = 0x8D // 141 + SYS_WCSSPN = 0x8E // 142 + SYS_WCSNCPY = 0x8F // 143 + SYS_ABS = 0x90 // 144 + SYS_DIV = 0x91 // 145 + SYS_LABS = 0x92 // 146 + SYS_STRNCPY = 0x93 // 147 + SYS_MEMCPY = 0x94 // 148 + SYS_MEMMOVE = 0x95 // 149 + SYS_STRCPY = 0x96 // 150 + SYS_STRCMP = 0x97 // 151 + SYS_STRCAT = 0x98 // 152 + SYS_STRNCAT = 0x99 // 153 + SYS_MEMCMP = 0x9A // 154 + SYS_MEMCHR = 0x9B // 155 + SYS_STRCOLL = 0x9C // 156 + SYS_STRNCMP = 0x9D // 157 + SYS_STRXFRM = 0x9E // 158 + SYS_STRRCHR = 0x9F // 159 + SYS_STRCHR = 0xA0 // 160 + SYS_STRCSPN = 0xA1 // 161 + SYS_STRPBRK = 0xA2 // 162 + SYS_MEMSET = 0xA3 // 163 + SYS_STRSPN = 0xA4 // 164 + SYS_STRSTR = 0xA5 // 165 + SYS_STRTOK = 0xA6 // 166 + SYS_DIFFTIME = 0xA7 // 167 + SYS_STRERROR = 0xA8 // 168 + SYS_STRLEN = 0xA9 // 169 + SYS_CLOCK = 0xAA // 170 + SYS_CTIME = 0xAB // 171 + SYS_MKTIME = 0xAC // 172 + SYS_TIME = 0xAD // 173 + SYS_ASCTIME = 0xAE // 174 + SYS_MBLEN = 0xAF // 175 + SYS_GMTIME = 0xB0 // 176 + SYS_LOCALTIM = 0xB1 // 177 + SYS_LOCALTIME = 0xB1 // 177 + SYS_STRFTIME = 0xB2 // 178 + SYS___GETCB = 0xB4 // 180 + SYS_FUPDATE = 0xB5 // 181 + SYS___FUPDT = 0xB5 // 181 + SYS_CLRMEMF = 0xBD // 189 + SYS___CLRMF = 0xBD // 189 + SYS_FETCHEP = 0xBF // 191 + SYS___FTCHEP = 0xBF // 191 + SYS_FLDATA = 0xC1 // 193 + SYS___FLDATA = 0xC1 // 193 + SYS_DYNFREE = 0xC2 // 194 + SYS___DYNFRE = 0xC2 // 194 + SYS_DYNALLOC = 0xC3 // 195 + SYS___DYNALL = 0xC3 // 195 + SYS___CDUMP = 0xC4 // 196 + SYS_CSNAP = 0xC5 // 197 + SYS___CSNAP = 0xC5 // 197 + SYS_CTRACE = 0xC6 // 198 + SYS___CTRACE = 0xC6 // 198 + SYS___CTEST = 0xC7 // 199 + SYS_SETENV = 0xC8 // 200 + SYS___SETENV = 0xC8 // 200 + SYS_CLEARENV = 0xC9 // 201 + SYS___CLRENV = 0xC9 // 201 + SYS___REGCOMP_STD = 0xEA // 234 + SYS_NL_LANGINFO = 0xFC // 252 + SYS_GETSYNTX = 0xFD // 253 + SYS_ISBLANK = 0xFE // 254 + SYS___ISBLNK = 0xFE // 254 + SYS_ISWALNUM = 0xFF // 255 + SYS_ISWALPHA = 0x100 // 256 + SYS_ISWBLANK = 0x101 // 257 + SYS___ISWBLK = 0x101 // 257 + SYS_ISWCNTRL = 0x102 // 258 + SYS_ISWDIGIT = 0x103 // 259 + SYS_ISWGRAPH = 0x104 // 260 + SYS_ISWLOWER = 0x105 // 261 + SYS_ISWPRINT = 0x106 // 262 + SYS_ISWPUNCT = 0x107 // 263 + SYS_ISWSPACE = 0x108 // 264 + SYS_ISWUPPER = 0x109 // 265 + SYS_ISWXDIGI = 0x10A // 266 + SYS_ISWXDIGIT = 0x10A // 266 + SYS_WCTYPE = 0x10B // 267 + SYS_ISWCTYPE = 0x10C // 268 + SYS_TOWLOWER = 0x10D // 269 + SYS_TOWUPPER = 0x10E // 270 + SYS_MBSINIT = 0x10F // 271 + SYS_WCTOB = 0x110 // 272 + SYS_MBRLEN = 0x111 // 273 + SYS_MBRTOWC = 0x112 // 274 + SYS_MBSRTOWC = 0x113 // 275 + SYS_MBSRTOWCS = 0x113 // 275 + SYS_WCRTOMB = 0x114 // 276 + SYS_WCSRTOMB = 0x115 // 277 + SYS_WCSRTOMBS = 0x115 // 277 + SYS___CSID = 0x116 // 278 + SYS___WCSID = 0x117 // 279 + SYS_STRPTIME = 0x118 // 280 + SYS___STRPTM = 0x118 // 280 + SYS_STRFMON = 0x119 // 281 + SYS___RPMTCH = 0x11A // 282 + SYS_WCSSTR = 0x11B // 283 + SYS_WCSTOK = 0x12C // 300 + SYS_WCSTOL = 0x12D // 301 + SYS_WCSTOD = 0x12E // 302 + SYS_WCSTOUL = 0x12F // 303 + SYS_WCSCOLL = 0x130 // 304 + SYS_WCSXFRM = 0x131 // 305 + SYS_WCSWIDTH = 0x132 // 306 + SYS_WCWIDTH = 0x133 // 307 + SYS_WCSFTIME = 0x134 // 308 + SYS_SWPRINTF = 0x135 // 309 + SYS_VSWPRINT = 0x136 // 310 + SYS_VSWPRINTF = 0x136 // 310 + SYS_SWSCANF = 0x137 // 311 + SYS_REGCOMP = 0x138 // 312 + SYS_REGEXEC = 0x139 // 313 + SYS_REGFREE = 0x13A // 314 + SYS_REGERROR = 0x13B // 315 + SYS_FGETWC = 0x13C // 316 + SYS_FGETWS = 0x13D // 317 + SYS_FPUTWC = 0x13E // 318 + SYS_FPUTWS = 0x13F // 319 + SYS_GETWC = 0x140 // 320 + SYS_GETWCHAR = 0x141 // 321 + SYS_PUTWC = 0x142 // 322 + SYS_PUTWCHAR = 0x143 // 323 + SYS_UNGETWC = 0x144 // 324 + SYS_ICONV_OPEN = 0x145 // 325 + SYS_ICONV = 0x146 // 326 + SYS_ICONV_CLOSE = 0x147 // 327 + SYS_ISMCCOLLEL = 0x14C // 332 + SYS_STRTOCOLL = 0x14D // 333 + SYS_COLLTOSTR = 0x14E // 334 + SYS_COLLEQUIV = 0x14F // 335 + SYS_COLLRANGE = 0x150 // 336 + SYS_CCLASS = 0x151 // 337 + SYS_COLLORDER = 0x152 // 338 + SYS___DEMANGLE = 0x154 // 340 + SYS_FDOPEN = 0x155 // 341 + SYS___ERRNO = 0x156 // 342 + SYS___ERRNO2 = 0x157 // 343 + SYS___TERROR = 0x158 // 344 + SYS_MAXCOLL = 0x169 // 361 + SYS_GETMCCOLL = 0x16A // 362 + SYS_GETWMCCOLL = 0x16B // 363 + SYS___ERR2AD = 0x16C // 364 + SYS_DLLQUERYFN = 0x16D // 365 + SYS_DLLQUERYVAR = 0x16E // 366 + SYS_DLLFREE = 0x16F // 367 + SYS_DLLLOAD = 0x170 // 368 + SYS__EXIT = 0x174 // 372 + SYS_ACCESS = 0x175 // 373 + SYS_ALARM = 0x176 // 374 + SYS_CFGETISPEED = 0x177 // 375 + SYS_CFGETOSPEED = 0x178 // 376 + SYS_CFSETISPEED = 0x179 // 377 + SYS_CFSETOSPEED = 0x17A // 378 + SYS_CHDIR = 0x17B // 379 + SYS_CHMOD = 0x17C // 380 + SYS_CHOWN = 0x17D // 381 + SYS_CLOSE = 0x17E // 382 + SYS_CLOSEDIR = 0x17F // 383 + SYS_CREAT = 0x180 // 384 + SYS_CTERMID = 0x181 // 385 + SYS_DUP = 0x182 // 386 + SYS_DUP2 = 0x183 // 387 + SYS_EXECL = 0x184 // 388 + SYS_EXECLE = 0x185 // 389 + SYS_EXECLP = 0x186 // 390 + SYS_EXECV = 0x187 // 391 + SYS_EXECVE = 0x188 // 392 + SYS_EXECVP = 0x189 // 393 + SYS_FCHMOD = 0x18A // 394 + SYS_FCHOWN = 0x18B // 395 + SYS_FCNTL = 0x18C // 396 + SYS_FILENO = 0x18D // 397 + SYS_FORK = 0x18E // 398 + SYS_FPATHCONF = 0x18F // 399 + SYS_FSTAT = 0x190 // 400 + SYS_FSYNC = 0x191 // 401 + SYS_FTRUNCATE = 0x192 // 402 + SYS_GETCWD = 0x193 // 403 + SYS_GETEGID = 0x194 // 404 + SYS_GETEUID = 0x195 // 405 + SYS_GETGID = 0x196 // 406 + SYS_GETGRGID = 0x197 // 407 + SYS_GETGRNAM = 0x198 // 408 + SYS_GETGROUPS = 0x199 // 409 + SYS_GETLOGIN = 0x19A // 410 + SYS_W_GETMNTENT = 0x19B // 411 + SYS_GETPGRP = 0x19C // 412 + SYS_GETPID = 0x19D // 413 + SYS_GETPPID = 0x19E // 414 + SYS_GETPWNAM = 0x19F // 415 + SYS_GETPWUID = 0x1A0 // 416 + SYS_GETUID = 0x1A1 // 417 + SYS_W_IOCTL = 0x1A2 // 418 + SYS_ISATTY = 0x1A3 // 419 + SYS_KILL = 0x1A4 // 420 + SYS_LINK = 0x1A5 // 421 + SYS_LSEEK = 0x1A6 // 422 + SYS_LSTAT = 0x1A7 // 423 + SYS_MKDIR = 0x1A8 // 424 + SYS_MKFIFO = 0x1A9 // 425 + SYS_MKNOD = 0x1AA // 426 + SYS_MOUNT = 0x1AB // 427 + SYS_OPEN = 0x1AC // 428 + SYS_OPENDIR = 0x1AD // 429 + SYS_PATHCONF = 0x1AE // 430 + SYS_PAUSE = 0x1AF // 431 + SYS_PIPE = 0x1B0 // 432 + SYS_W_GETPSENT = 0x1B1 // 433 + SYS_READ = 0x1B2 // 434 + SYS_READDIR = 0x1B3 // 435 + SYS_READLINK = 0x1B4 // 436 + SYS_REWINDDIR = 0x1B5 // 437 + SYS_RMDIR = 0x1B6 // 438 + SYS_SETEGID = 0x1B7 // 439 + SYS_SETEUID = 0x1B8 // 440 + SYS_SETGID = 0x1B9 // 441 + SYS_SETPGID = 0x1BA // 442 + SYS_SETSID = 0x1BB // 443 + SYS_SETUID = 0x1BC // 444 + SYS_SIGACTION = 0x1BD // 445 + SYS_SIGADDSET = 0x1BE // 446 + SYS_SIGDELSET = 0x1BF // 447 + SYS_SIGEMPTYSET = 0x1C0 // 448 + SYS_SIGFILLSET = 0x1C1 // 449 + SYS_SIGISMEMBER = 0x1C2 // 450 + SYS_SIGLONGJMP = 0x1C3 // 451 + SYS_SIGPENDING = 0x1C4 // 452 + SYS_SIGPROCMASK = 0x1C5 // 453 + SYS_SIGSETJMP = 0x1C6 // 454 + SYS_SIGSUSPEND = 0x1C7 // 455 + SYS_SLEEP = 0x1C8 // 456 + SYS_STAT = 0x1C9 // 457 + SYS_W_STATFS = 0x1CA // 458 + SYS_SYMLINK = 0x1CB // 459 + SYS_SYSCONF = 0x1CC // 460 + SYS_TCDRAIN = 0x1CD // 461 + SYS_TCFLOW = 0x1CE // 462 + SYS_TCFLUSH = 0x1CF // 463 + SYS_TCGETATTR = 0x1D0 // 464 + SYS_TCGETPGRP = 0x1D1 // 465 + SYS_TCSENDBREAK = 0x1D2 // 466 + SYS_TCSETATTR = 0x1D3 // 467 + SYS_TCSETPGRP = 0x1D4 // 468 + SYS_TIMES = 0x1D5 // 469 + SYS_TTYNAME = 0x1D6 // 470 + SYS_TZSET = 0x1D7 // 471 + SYS_UMASK = 0x1D8 // 472 + SYS_UMOUNT = 0x1D9 // 473 + SYS_UNAME = 0x1DA // 474 + SYS_UNLINK = 0x1DB // 475 + SYS_UTIME = 0x1DC // 476 + SYS_WAIT = 0x1DD // 477 + SYS_WAITPID = 0x1DE // 478 + SYS_WRITE = 0x1DF // 479 + SYS_CHAUDIT = 0x1E0 // 480 + SYS_FCHAUDIT = 0x1E1 // 481 + SYS_GETGROUPSBYNAME = 0x1E2 // 482 + SYS_SIGWAIT = 0x1E3 // 483 + SYS_PTHREAD_EXIT = 0x1E4 // 484 + SYS_PTHREAD_KILL = 0x1E5 // 485 + SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 + SYS_PTHREAD_CANCEL = 0x1EE // 494 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 + SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 + SYS_PTHREAD_COND_INIT = 0x1F3 // 499 + SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 + SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 + SYS_PTHREAD_CREATE = 0x1F9 // 505 + SYS_PTHREAD_DETACH = 0x1FA // 506 + SYS_PTHREAD_EQUAL = 0x1FB // 507 + SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 + SYS_PTHREAD_JOIN = 0x1FD // 509 + SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 + SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 + SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 + SYS_PTHREAD_ONCE = 0x209 // 521 + SYS_PTHREAD_SELF = 0x20A // 522 + SYS_PTHREAD_SETINTR = 0x20B // 523 + SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 + SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 + SYS_PTHREAD_TESTINTR = 0x20E // 526 + SYS_PTHREAD_YIELD = 0x20F // 527 + SYS_TW_OPEN = 0x210 // 528 + SYS_TW_FCNTL = 0x211 // 529 + SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 + SYS_EXTLINK_NP = 0x215 // 533 + SYS___PASSWD = 0x216 // 534 + SYS_SETGROUPS = 0x217 // 535 + SYS_INITGROUPS = 0x218 // 536 + SYS_WCSPBRK = 0x23F // 575 + SYS_WCSRCHR = 0x240 // 576 + SYS_SVC99 = 0x241 // 577 + SYS___SVC99 = 0x241 // 577 + SYS_WCSWCS = 0x242 // 578 + SYS_LOCALECO = 0x243 // 579 + SYS_LOCALECONV = 0x243 // 579 + SYS___LIBREL = 0x244 // 580 + SYS_RELEASE = 0x245 // 581 + SYS___RLSE = 0x245 // 581 + SYS_FLOCATE = 0x246 // 582 + SYS___FLOCT = 0x246 // 582 + SYS_FDELREC = 0x247 // 583 + SYS___FDLREC = 0x247 // 583 + SYS_FETCH = 0x248 // 584 + SYS___FETCH = 0x248 // 584 + SYS_QSORT = 0x249 // 585 + SYS_GETENV = 0x24A // 586 + SYS_SYSTEM = 0x24B // 587 + SYS_BSEARCH = 0x24C // 588 + SYS_LDIV = 0x24D // 589 + SYS___THROW = 0x25E // 606 + SYS___RETHROW = 0x25F // 607 + SYS___CLEANUPCATCH = 0x260 // 608 + SYS___CATCHMATCH = 0x261 // 609 + SYS___CLEAN2UPCATCH = 0x262 // 610 + SYS_PUTENV = 0x26A // 618 + SYS___GETENV = 0x26F // 623 + SYS_GETPRIORITY = 0x270 // 624 + SYS_NICE = 0x271 // 625 + SYS_SETPRIORITY = 0x272 // 626 + SYS_GETITIMER = 0x273 // 627 + SYS_SETITIMER = 0x274 // 628 + SYS_MSGCTL = 0x275 // 629 + SYS_MSGGET = 0x276 // 630 + SYS_MSGRCV = 0x277 // 631 + SYS_MSGSND = 0x278 // 632 + SYS_MSGXRCV = 0x279 // 633 + SYS___MSGXR = 0x279 // 633 + SYS_SEMCTL = 0x27A // 634 + SYS_SEMGET = 0x27B // 635 + SYS_SEMOP = 0x27C // 636 + SYS_SHMAT = 0x27D // 637 + SYS_SHMCTL = 0x27E // 638 + SYS_SHMDT = 0x27F // 639 + SYS_SHMGET = 0x280 // 640 + SYS___GETIPC = 0x281 // 641 + SYS_SETGRENT = 0x282 // 642 + SYS_GETGRENT = 0x283 // 643 + SYS_ENDGRENT = 0x284 // 644 + SYS_SETPWENT = 0x285 // 645 + SYS_GETPWENT = 0x286 // 646 + SYS_ENDPWENT = 0x287 // 647 + SYS_BSD_SIGNAL = 0x288 // 648 + SYS_KILLPG = 0x289 // 649 + SYS_SIGALTSTACK = 0x28A // 650 + SYS_SIGHOLD = 0x28B // 651 + SYS_SIGIGNORE = 0x28C // 652 + SYS_SIGINTERRUPT = 0x28D // 653 + SYS_SIGPAUSE = 0x28E // 654 + SYS_SIGRELSE = 0x28F // 655 + SYS_SIGSET = 0x290 // 656 + SYS_SIGSTACK = 0x291 // 657 + SYS_GETRLIMIT = 0x292 // 658 + SYS_SETRLIMIT = 0x293 // 659 + SYS_GETRUSAGE = 0x294 // 660 + SYS_MMAP = 0x295 // 661 + SYS_MPROTECT = 0x296 // 662 + SYS_MSYNC = 0x297 // 663 + SYS_MUNMAP = 0x298 // 664 + SYS_CONFSTR = 0x299 // 665 + SYS_GETOPT = 0x29A // 666 + SYS_LCHOWN = 0x29B // 667 + SYS_TRUNCATE = 0x29C // 668 + SYS_GETSUBOPT = 0x29D // 669 + SYS_SETPGRP = 0x29E // 670 + SYS___GDERR = 0x29F // 671 + SYS___TZONE = 0x2A0 // 672 + SYS___DLGHT = 0x2A1 // 673 + SYS___OPARGF = 0x2A2 // 674 + SYS___OPOPTF = 0x2A3 // 675 + SYS___OPINDF = 0x2A4 // 676 + SYS___OPERRF = 0x2A5 // 677 + SYS_GETDATE = 0x2A6 // 678 + SYS_WAIT3 = 0x2A7 // 679 + SYS_WAITID = 0x2A8 // 680 + SYS___CATTRM = 0x2A9 // 681 + SYS___GDTRM = 0x2AA // 682 + SYS___RNDTRM = 0x2AB // 683 + SYS_CRYPT = 0x2AC // 684 + SYS_ENCRYPT = 0x2AD // 685 + SYS_SETKEY = 0x2AE // 686 + SYS___CNVBLK = 0x2AF // 687 + SYS___CRYTRM = 0x2B0 // 688 + SYS___ECRTRM = 0x2B1 // 689 + SYS_DRAND48 = 0x2B2 // 690 + SYS_ERAND48 = 0x2B3 // 691 + SYS_FSTATVFS = 0x2B4 // 692 + SYS_STATVFS = 0x2B5 // 693 + SYS_CATCLOSE = 0x2B6 // 694 + SYS_CATGETS = 0x2B7 // 695 + SYS_CATOPEN = 0x2B8 // 696 + SYS_BCMP = 0x2B9 // 697 + SYS_BCOPY = 0x2BA // 698 + SYS_BZERO = 0x2BB // 699 + SYS_FFS = 0x2BC // 700 + SYS_INDEX = 0x2BD // 701 + SYS_RINDEX = 0x2BE // 702 + SYS_STRCASECMP = 0x2BF // 703 + SYS_STRDUP = 0x2C0 // 704 + SYS_STRNCASECMP = 0x2C1 // 705 + SYS_INITSTATE = 0x2C2 // 706 + SYS_SETSTATE = 0x2C3 // 707 + SYS_RANDOM = 0x2C4 // 708 + SYS_SRANDOM = 0x2C5 // 709 + SYS_HCREATE = 0x2C6 // 710 + SYS_HDESTROY = 0x2C7 // 711 + SYS_HSEARCH = 0x2C8 // 712 + SYS_LFIND = 0x2C9 // 713 + SYS_LSEARCH = 0x2CA // 714 + SYS_TDELETE = 0x2CB // 715 + SYS_TFIND = 0x2CC // 716 + SYS_TSEARCH = 0x2CD // 717 + SYS_TWALK = 0x2CE // 718 + SYS_INSQUE = 0x2CF // 719 + SYS_REMQUE = 0x2D0 // 720 + SYS_POPEN = 0x2D1 // 721 + SYS_PCLOSE = 0x2D2 // 722 + SYS_SWAB = 0x2D3 // 723 + SYS_MEMCCPY = 0x2D4 // 724 + SYS_GETPAGESIZE = 0x2D8 // 728 + SYS_FCHDIR = 0x2D9 // 729 + SYS___OCLCK = 0x2DA // 730 + SYS___ATOE = 0x2DB // 731 + SYS___ATOE_L = 0x2DC // 732 + SYS___ETOA = 0x2DD // 733 + SYS___ETOA_L = 0x2DE // 734 + SYS_SETUTXENT = 0x2DF // 735 + SYS_GETUTXENT = 0x2E0 // 736 + SYS_ENDUTXENT = 0x2E1 // 737 + SYS_GETUTXID = 0x2E2 // 738 + SYS_GETUTXLINE = 0x2E3 // 739 + SYS_PUTUTXLINE = 0x2E4 // 740 + SYS_FMTMSG = 0x2E5 // 741 + SYS_JRAND48 = 0x2E6 // 742 + SYS_LRAND48 = 0x2E7 // 743 + SYS_MRAND48 = 0x2E8 // 744 + SYS_NRAND48 = 0x2E9 // 745 + SYS_LCONG48 = 0x2EA // 746 + SYS_SRAND48 = 0x2EB // 747 + SYS_SEED48 = 0x2EC // 748 + SYS_ISASCII = 0x2ED // 749 + SYS_TOASCII = 0x2EE // 750 + SYS_A64L = 0x2EF // 751 + SYS_L64A = 0x2F0 // 752 + SYS_UALARM = 0x2F1 // 753 + SYS_USLEEP = 0x2F2 // 754 + SYS___UTXTRM = 0x2F3 // 755 + SYS___SRCTRM = 0x2F4 // 756 + SYS_FTIME = 0x2F5 // 757 + SYS_GETTIMEOFDAY = 0x2F6 // 758 + SYS_DBM_CLEARERR = 0x2F7 // 759 + SYS_DBM_CLOSE = 0x2F8 // 760 + SYS_DBM_DELETE = 0x2F9 // 761 + SYS_DBM_ERROR = 0x2FA // 762 + SYS_DBM_FETCH = 0x2FB // 763 + SYS_DBM_FIRSTKEY = 0x2FC // 764 + SYS_DBM_NEXTKEY = 0x2FD // 765 + SYS_DBM_OPEN = 0x2FE // 766 + SYS_DBM_STORE = 0x2FF // 767 + SYS___NDMTRM = 0x300 // 768 + SYS_FTOK = 0x301 // 769 + SYS_BASENAME = 0x302 // 770 + SYS_DIRNAME = 0x303 // 771 + SYS_GETDTABLESIZE = 0x304 // 772 + SYS_MKSTEMP = 0x305 // 773 + SYS_MKTEMP = 0x306 // 774 + SYS_NFTW = 0x307 // 775 + SYS_GETWD = 0x308 // 776 + SYS_LOCKF = 0x309 // 777 + SYS__LONGJMP = 0x30D // 781 + SYS__SETJMP = 0x30E // 782 + SYS_VFORK = 0x30F // 783 + SYS_WORDEXP = 0x310 // 784 + SYS_WORDFREE = 0x311 // 785 + SYS_GETPGID = 0x312 // 786 + SYS_GETSID = 0x313 // 787 + SYS___UTMPXNAME = 0x314 // 788 + SYS_CUSERID = 0x315 // 789 + SYS_GETPASS = 0x316 // 790 + SYS_FNMATCH = 0x317 // 791 + SYS_FTW = 0x318 // 792 + SYS_GETW = 0x319 // 793 + SYS_GLOB = 0x31A // 794 + SYS_GLOBFREE = 0x31B // 795 + SYS_PUTW = 0x31C // 796 + SYS_SEEKDIR = 0x31D // 797 + SYS_TELLDIR = 0x31E // 798 + SYS_TEMPNAM = 0x31F // 799 + SYS_ACOSH = 0x320 // 800 + SYS_ASINH = 0x321 // 801 + SYS_ATANH = 0x322 // 802 + SYS_CBRT = 0x323 // 803 + SYS_EXPM1 = 0x324 // 804 + SYS_ILOGB = 0x325 // 805 + SYS_LOGB = 0x326 // 806 + SYS_LOG1P = 0x327 // 807 + SYS_NEXTAFTER = 0x328 // 808 + SYS_RINT = 0x329 // 809 + SYS_REMAINDER = 0x32A // 810 + SYS_SCALB = 0x32B // 811 + SYS_LGAMMA = 0x32C // 812 + SYS_TTYSLOT = 0x32D // 813 + SYS_GETTIMEOFDAY_R = 0x32E // 814 + SYS_SYNC = 0x32F // 815 + SYS_SPAWN = 0x330 // 816 + SYS_SPAWNP = 0x331 // 817 + SYS_GETLOGIN_UU = 0x332 // 818 + SYS_ECVT = 0x333 // 819 + SYS_FCVT = 0x334 // 820 + SYS_GCVT = 0x335 // 821 + SYS_ACCEPT = 0x336 // 822 + SYS_BIND = 0x337 // 823 + SYS_CONNECT = 0x338 // 824 + SYS_ENDHOSTENT = 0x339 // 825 + SYS_ENDPROTOENT = 0x33A // 826 + SYS_ENDSERVENT = 0x33B // 827 + SYS_GETHOSTBYADDR_R = 0x33C // 828 + SYS_GETHOSTBYADDR = 0x33D // 829 + SYS_GETHOSTBYNAME_R = 0x33E // 830 + SYS_GETHOSTBYNAME = 0x33F // 831 + SYS_GETHOSTENT = 0x340 // 832 + SYS_GETHOSTID = 0x341 // 833 + SYS_GETHOSTNAME = 0x342 // 834 + SYS_GETNETBYADDR = 0x343 // 835 + SYS_GETNETBYNAME = 0x344 // 836 + SYS_GETNETENT = 0x345 // 837 + SYS_GETPEERNAME = 0x346 // 838 + SYS_GETPROTOBYNAME = 0x347 // 839 + SYS_GETPROTOBYNUMBER = 0x348 // 840 + SYS_GETPROTOENT = 0x349 // 841 + SYS_GETSERVBYNAME = 0x34A // 842 + SYS_GETSERVBYPORT = 0x34B // 843 + SYS_GETSERVENT = 0x34C // 844 + SYS_GETSOCKNAME = 0x34D // 845 + SYS_GETSOCKOPT = 0x34E // 846 + SYS_INET_ADDR = 0x34F // 847 + SYS_INET_LNAOF = 0x350 // 848 + SYS_INET_MAKEADDR = 0x351 // 849 + SYS_INET_NETOF = 0x352 // 850 + SYS_INET_NETWORK = 0x353 // 851 + SYS_INET_NTOA = 0x354 // 852 + SYS_IOCTL = 0x355 // 853 + SYS_LISTEN = 0x356 // 854 + SYS_READV = 0x357 // 855 + SYS_RECV = 0x358 // 856 + SYS_RECVFROM = 0x359 // 857 + SYS_SELECT = 0x35B // 859 + SYS_SELECTEX = 0x35C // 860 + SYS_SEND = 0x35D // 861 + SYS_SENDTO = 0x35F // 863 + SYS_SETHOSTENT = 0x360 // 864 + SYS_SETNETENT = 0x361 // 865 + SYS_SETPEER = 0x362 // 866 + SYS_SETPROTOENT = 0x363 // 867 + SYS_SETSERVENT = 0x364 // 868 + SYS_SETSOCKOPT = 0x365 // 869 + SYS_SHUTDOWN = 0x366 // 870 + SYS_SOCKET = 0x367 // 871 + SYS_SOCKETPAIR = 0x368 // 872 + SYS_WRITEV = 0x369 // 873 + SYS_CHROOT = 0x36A // 874 + SYS_W_STATVFS = 0x36B // 875 + SYS_ULIMIT = 0x36C // 876 + SYS_ISNAN = 0x36D // 877 + SYS_UTIMES = 0x36E // 878 + SYS___H_ERRNO = 0x36F // 879 + SYS_ENDNETENT = 0x370 // 880 + SYS_CLOSELOG = 0x371 // 881 + SYS_OPENLOG = 0x372 // 882 + SYS_SETLOGMASK = 0x373 // 883 + SYS_SYSLOG = 0x374 // 884 + SYS_PTSNAME = 0x375 // 885 + SYS_SETREUID = 0x376 // 886 + SYS_SETREGID = 0x377 // 887 + SYS_REALPATH = 0x378 // 888 + SYS___SIGNGAM = 0x379 // 889 + SYS_GRANTPT = 0x37A // 890 + SYS_UNLOCKPT = 0x37B // 891 + SYS_TCGETSID = 0x37C // 892 + SYS___TCGETCP = 0x37D // 893 + SYS___TCSETCP = 0x37E // 894 + SYS___TCSETTABLES = 0x37F // 895 + SYS_POLL = 0x380 // 896 + SYS_REXEC = 0x381 // 897 + SYS___ISASCII2 = 0x382 // 898 + SYS___TOASCII2 = 0x383 // 899 + SYS_CHPRIORITY = 0x384 // 900 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 + SYS___STNETENT = 0x388 // 904 + SYS___STPROTOENT = 0x389 // 905 + SYS___STSERVENT = 0x38A // 906 + SYS___STHOSTENT = 0x38B // 907 + SYS_NLIST = 0x38C // 908 + SYS___IPDBCS = 0x38D // 909 + SYS___IPDSPX = 0x38E // 910 + SYS___IPMSGC = 0x38F // 911 + SYS___SELECT1 = 0x390 // 912 + SYS_PTHREAD_SECURITY_NP = 0x391 // 913 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 + SYS___CONVERT_ID_NP = 0x393 // 915 + SYS___OPENVMREL = 0x394 // 916 + SYS_WMEMCHR = 0x395 // 917 + SYS_WMEMCMP = 0x396 // 918 + SYS_WMEMCPY = 0x397 // 919 + SYS_WMEMMOVE = 0x398 // 920 + SYS_WMEMSET = 0x399 // 921 + SYS___FPUTWC = 0x400 // 1024 + SYS___PUTWC = 0x401 // 1025 + SYS___PWCHAR = 0x402 // 1026 + SYS___WCSFTM = 0x403 // 1027 + SYS___WCSTOK = 0x404 // 1028 + SYS___WCWDTH = 0x405 // 1029 + SYS_T_ACCEPT = 0x409 // 1033 + SYS_T_ALLOC = 0x40A // 1034 + SYS_T_BIND = 0x40B // 1035 + SYS_T_CLOSE = 0x40C // 1036 + SYS_T_CONNECT = 0x40D // 1037 + SYS_T_ERROR = 0x40E // 1038 + SYS_T_FREE = 0x40F // 1039 + SYS_T_GETINFO = 0x410 // 1040 + SYS_T_GETPROTADDR = 0x411 // 1041 + SYS_T_GETSTATE = 0x412 // 1042 + SYS_T_LISTEN = 0x413 // 1043 + SYS_T_LOOK = 0x414 // 1044 + SYS_T_OPEN = 0x415 // 1045 + SYS_T_OPTMGMT = 0x416 // 1046 + SYS_T_RCV = 0x417 // 1047 + SYS_T_RCVCONNECT = 0x418 // 1048 + SYS_T_RCVDIS = 0x419 // 1049 + SYS_T_RCVREL = 0x41A // 1050 + SYS_T_RCVUDATA = 0x41B // 1051 + SYS_T_RCVUDERR = 0x41C // 1052 + SYS_T_SND = 0x41D // 1053 + SYS_T_SNDDIS = 0x41E // 1054 + SYS_T_SNDREL = 0x41F // 1055 + SYS_T_SNDUDATA = 0x420 // 1056 + SYS_T_STRERROR = 0x421 // 1057 + SYS_T_SYNC = 0x422 // 1058 + SYS_T_UNBIND = 0x423 // 1059 + SYS___T_ERRNO = 0x424 // 1060 + SYS___RECVMSG2 = 0x425 // 1061 + SYS___SENDMSG2 = 0x426 // 1062 + SYS_FATTACH = 0x427 // 1063 + SYS_FDETACH = 0x428 // 1064 + SYS_GETMSG = 0x429 // 1065 + SYS_GETPMSG = 0x42A // 1066 + SYS_ISASTREAM = 0x42B // 1067 + SYS_PUTMSG = 0x42C // 1068 + SYS_PUTPMSG = 0x42D // 1069 + SYS___ISPOSIXON = 0x42E // 1070 + SYS___OPENMVSREL = 0x42F // 1071 + SYS_GETCONTEXT = 0x430 // 1072 + SYS_SETCONTEXT = 0x431 // 1073 + SYS_MAKECONTEXT = 0x432 // 1074 + SYS_SWAPCONTEXT = 0x433 // 1075 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 + SYS_GETCLIENTID = 0x470 // 1136 + SYS___GETCLIENTID = 0x471 // 1137 + SYS_GETSTABLESIZE = 0x472 // 1138 + SYS_GETIBMOPT = 0x473 // 1139 + SYS_GETIBMSOCKOPT = 0x474 // 1140 + SYS_GIVESOCKET = 0x475 // 1141 + SYS_IBMSFLUSH = 0x476 // 1142 + SYS_MAXDESC = 0x477 // 1143 + SYS_SETIBMOPT = 0x478 // 1144 + SYS_SETIBMSOCKOPT = 0x479 // 1145 + SYS_SOCK_DEBUG = 0x47A // 1146 + SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 + SYS_TAKESOCKET = 0x47E // 1150 + SYS___SERVER_INIT = 0x47F // 1151 + SYS___SERVER_PWU = 0x480 // 1152 + SYS_PTHREAD_TAG_NP = 0x481 // 1153 + SYS___CONSOLE = 0x482 // 1154 + SYS___WSINIT = 0x483 // 1155 + SYS___IPTCPN = 0x489 // 1161 + SYS___SMF_RECORD = 0x48A // 1162 + SYS___IPHOST = 0x48B // 1163 + SYS___IPNODE = 0x48C // 1164 + SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 + SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 + SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 + SYS___SERVER_CLASSIFY = 0x490 // 1168 + SYS___HEAPRPT = 0x496 // 1174 + SYS___FNWSA = 0x49B // 1179 + SYS___SPAWN2 = 0x49D // 1181 + SYS___SPAWNP2 = 0x49E // 1182 + SYS___GDRR = 0x4A1 // 1185 + SYS___HRRNO = 0x4A2 // 1186 + SYS___OPRG = 0x4A3 // 1187 + SYS___OPRR = 0x4A4 // 1188 + SYS___OPND = 0x4A5 // 1189 + SYS___OPPT = 0x4A6 // 1190 + SYS___SIGGM = 0x4A7 // 1191 + SYS___DGHT = 0x4A8 // 1192 + SYS___TZNE = 0x4A9 // 1193 + SYS___TZZN = 0x4AA // 1194 + SYS___TRRNO = 0x4AF // 1199 + SYS___ENVN = 0x4B0 // 1200 + SYS___MLOCKALL = 0x4B1 // 1201 + SYS_CREATEWO = 0x4B2 // 1202 + SYS_CREATEWORKUNIT = 0x4B2 // 1202 + SYS_CONTINUE = 0x4B3 // 1203 + SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 + SYS_CONNECTW = 0x4B4 // 1204 + SYS_CONNECTWORKMGR = 0x4B4 // 1204 + SYS_CONNECTS = 0x4B5 // 1205 + SYS_CONNECTSERVER = 0x4B5 // 1205 + SYS_DISCONNE = 0x4B6 // 1206 + SYS_DISCONNECTSERVER = 0x4B6 // 1206 + SYS_JOINWORK = 0x4B7 // 1207 + SYS_JOINWORKUNIT = 0x4B7 // 1207 + SYS_LEAVEWOR = 0x4B8 // 1208 + SYS_LEAVEWORKUNIT = 0x4B8 // 1208 + SYS_DELETEWO = 0x4B9 // 1209 + SYS_DELETEWORKUNIT = 0x4B9 // 1209 + SYS_QUERYMET = 0x4BA // 1210 + SYS_QUERYMETRICS = 0x4BA // 1210 + SYS_QUERYSCH = 0x4BB // 1211 + SYS_QUERYSCHENV = 0x4BB // 1211 + SYS_CHECKSCH = 0x4BC // 1212 + SYS_CHECKSCHENV = 0x4BC // 1212 + SYS___PID_AFFINITY = 0x4BD // 1213 + SYS___ASINH_B = 0x4BE // 1214 + SYS___ATAN_B = 0x4BF // 1215 + SYS___CBRT_B = 0x4C0 // 1216 + SYS___CEIL_B = 0x4C1 // 1217 + SYS_COPYSIGN = 0x4C2 // 1218 + SYS___COS_B = 0x4C3 // 1219 + SYS___ERF_B = 0x4C4 // 1220 + SYS___ERFC_B = 0x4C5 // 1221 + SYS___EXPM1_B = 0x4C6 // 1222 + SYS___FABS_B = 0x4C7 // 1223 + SYS_FINITE = 0x4C8 // 1224 + SYS___FLOOR_B = 0x4C9 // 1225 + SYS___FREXP_B = 0x4CA // 1226 + SYS___ILOGB_B = 0x4CB // 1227 + SYS___ISNAN_B = 0x4CC // 1228 + SYS___LDEXP_B = 0x4CD // 1229 + SYS___LOG1P_B = 0x4CE // 1230 + SYS___LOGB_B = 0x4CF // 1231 + SYS_MATHERR = 0x4D0 // 1232 + SYS___MODF_B = 0x4D1 // 1233 + SYS___NEXTAFTER_B = 0x4D2 // 1234 + SYS___RINT_B = 0x4D3 // 1235 + SYS_SCALBN = 0x4D4 // 1236 + SYS_SIGNIFIC = 0x4D5 // 1237 + SYS_SIGNIFICAND = 0x4D5 // 1237 + SYS___SIN_B = 0x4D6 // 1238 + SYS___TAN_B = 0x4D7 // 1239 + SYS___TANH_B = 0x4D8 // 1240 + SYS___ACOS_B = 0x4D9 // 1241 + SYS___ACOSH_B = 0x4DA // 1242 + SYS___ASIN_B = 0x4DB // 1243 + SYS___ATAN2_B = 0x4DC // 1244 + SYS___ATANH_B = 0x4DD // 1245 + SYS___COSH_B = 0x4DE // 1246 + SYS___EXP_B = 0x4DF // 1247 + SYS___FMOD_B = 0x4E0 // 1248 + SYS___GAMMA_B = 0x4E1 // 1249 + SYS_GAMMA_R = 0x4E2 // 1250 + SYS___HYPOT_B = 0x4E3 // 1251 + SYS___J0_B = 0x4E4 // 1252 + SYS___Y0_B = 0x4E5 // 1253 + SYS___J1_B = 0x4E6 // 1254 + SYS___Y1_B = 0x4E7 // 1255 + SYS___JN_B = 0x4E8 // 1256 + SYS___YN_B = 0x4E9 // 1257 + SYS___LGAMMA_B = 0x4EA // 1258 + SYS_LGAMMA_R = 0x4EB // 1259 + SYS___LOG_B = 0x4EC // 1260 + SYS___LOG10_B = 0x4ED // 1261 + SYS___POW_B = 0x4EE // 1262 + SYS___REMAINDER_B = 0x4EF // 1263 + SYS___SCALB_B = 0x4F0 // 1264 + SYS___SINH_B = 0x4F1 // 1265 + SYS___SQRT_B = 0x4F2 // 1266 + SYS___OPENDIR2 = 0x4F3 // 1267 + SYS___READDIR2 = 0x4F4 // 1268 + SYS___LOGIN = 0x4F5 // 1269 + SYS___OPEN_STAT = 0x4F6 // 1270 + SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 + SYS___FP_SETMODE = 0x4F8 // 1272 + SYS___SIGACTIONSET = 0x4FB // 1275 + SYS___UCREATE = 0x4FC // 1276 + SYS___UMALLOC = 0x4FD // 1277 + SYS___UFREE = 0x4FE // 1278 + SYS___UHEAPREPORT = 0x4FF // 1279 + SYS___ISBFP = 0x500 // 1280 + SYS___FP_CAST = 0x501 // 1281 + SYS___CERTIFICATE = 0x502 // 1282 + SYS_SEND_FILE = 0x503 // 1283 + SYS_AIO_CANCEL = 0x504 // 1284 + SYS_AIO_ERROR = 0x505 // 1285 + SYS_AIO_READ = 0x506 // 1286 + SYS_AIO_RETURN = 0x507 // 1287 + SYS_AIO_SUSPEND = 0x508 // 1288 + SYS_AIO_WRITE = 0x509 // 1289 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 + SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 + SYS___CTTBL = 0x517 // 1303 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 + SYS___FP_CLR_FLAG = 0x51A // 1306 + SYS___FP_READ_FLAG = 0x51B // 1307 + SYS___FP_RAISE_XCP = 0x51C // 1308 + SYS___FP_CLASS = 0x51D // 1309 + SYS___FP_FINITE = 0x51E // 1310 + SYS___FP_ISNAN = 0x51F // 1311 + SYS___FP_UNORDERED = 0x520 // 1312 + SYS___FP_READ_RND = 0x521 // 1313 + SYS___FP_READ_RND_B = 0x522 // 1314 + SYS___FP_SWAP_RND = 0x523 // 1315 + SYS___FP_SWAP_RND_B = 0x524 // 1316 + SYS___FP_LEVEL = 0x525 // 1317 + SYS___FP_BTOH = 0x526 // 1318 + SYS___FP_HTOB = 0x527 // 1319 + SYS___FPC_RD = 0x528 // 1320 + SYS___FPC_WR = 0x529 // 1321 + SYS___FPC_RW = 0x52A // 1322 + SYS___FPC_SM = 0x52B // 1323 + SYS___FPC_RS = 0x52C // 1324 + SYS_SIGTIMEDWAIT = 0x52D // 1325 + SYS_SIGWAITINFO = 0x52E // 1326 + SYS___CHKBFP = 0x52F // 1327 + SYS___W_PIOCTL = 0x59E // 1438 + SYS___OSENV = 0x59F // 1439 + SYS_EXPORTWO = 0x5A1 // 1441 + SYS_EXPORTWORKUNIT = 0x5A1 // 1441 + SYS_UNDOEXPO = 0x5A2 // 1442 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 + SYS_IMPORTWO = 0x5A3 // 1443 + SYS_IMPORTWORKUNIT = 0x5A3 // 1443 + SYS_UNDOIMPO = 0x5A4 // 1444 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 + SYS_EXTRACTW = 0x5A5 // 1445 + SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 + SYS___CPL = 0x5A6 // 1446 + SYS___MAP_INIT = 0x5A7 // 1447 + SYS___MAP_SERVICE = 0x5A8 // 1448 + SYS_SIGQUEUE = 0x5A9 // 1449 + SYS___MOUNT = 0x5AA // 1450 + SYS___GETUSERID = 0x5AB // 1451 + SYS___IPDOMAINNAME = 0x5AC // 1452 + SYS_QUERYENC = 0x5AD // 1453 + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 + SYS_CONNECTE = 0x5AE // 1454 + SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 + SYS___FP_SWAPMODE = 0x5AF // 1455 + SYS_STRTOLL = 0x5B0 // 1456 + SYS_STRTOULL = 0x5B1 // 1457 + SYS___DSA_PREV = 0x5B2 // 1458 + SYS___EP_FIND = 0x5B3 // 1459 + SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 + SYS___MSGRCV_TIMED = 0x5B7 // 1463 + SYS___SEMOP_TIMED = 0x5B8 // 1464 + SYS___GET_CPUID = 0x5B9 // 1465 + SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 + SYS_FTELLO = 0x5C8 // 1480 + SYS_FSEEKO = 0x5C9 // 1481 + SYS_LLDIV = 0x5CB // 1483 + SYS_WCSTOLL = 0x5CC // 1484 + SYS_WCSTOULL = 0x5CD // 1485 + SYS_LLABS = 0x5CE // 1486 + SYS___CONSOLE2 = 0x5D2 // 1490 + SYS_INET_NTOP = 0x5D3 // 1491 + SYS_INET_PTON = 0x5D4 // 1492 + SYS___RES = 0x5D6 // 1494 + SYS_RES_MKQUERY = 0x5D7 // 1495 + SYS_RES_INIT = 0x5D8 // 1496 + SYS_RES_QUERY = 0x5D9 // 1497 + SYS_RES_SEARCH = 0x5DA // 1498 + SYS_RES_SEND = 0x5DB // 1499 + SYS_RES_QUERYDOMAIN = 0x5DC // 1500 + SYS_DN_EXPAND = 0x5DD // 1501 + SYS_DN_SKIPNAME = 0x5DE // 1502 + SYS_DN_COMP = 0x5DF // 1503 + SYS_ASCTIME_R = 0x5E0 // 1504 + SYS_CTIME_R = 0x5E1 // 1505 + SYS_GMTIME_R = 0x5E2 // 1506 + SYS_LOCALTIME_R = 0x5E3 // 1507 + SYS_RAND_R = 0x5E4 // 1508 + SYS_STRTOK_R = 0x5E5 // 1509 + SYS_READDIR_R = 0x5E6 // 1510 + SYS_GETGRGID_R = 0x5E7 // 1511 + SYS_GETGRNAM_R = 0x5E8 // 1512 + SYS_GETLOGIN_R = 0x5E9 // 1513 + SYS_GETPWNAM_R = 0x5EA // 1514 + SYS_GETPWUID_R = 0x5EB // 1515 + SYS_TTYNAME_R = 0x5EC // 1516 + SYS_PTHREAD_ATFORK = 0x5ED // 1517 + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 + SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 + SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 + SYS___DISCARDDATA = 0x5F8 // 1528 + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 + SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 + SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 + SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 + SYS___ATANF_B = 0x602 // 1538 + SYS___ATANL_B = 0x603 // 1539 + SYS___CEILF_B = 0x604 // 1540 + SYS___CEILL_B = 0x605 // 1541 + SYS___COSF_B = 0x606 // 1542 + SYS___COSL_B = 0x607 // 1543 + SYS___FABSF_B = 0x608 // 1544 + SYS___FABSL_B = 0x609 // 1545 + SYS___FLOORF_B = 0x60A // 1546 + SYS___FLOORL_B = 0x60B // 1547 + SYS___FREXPF_B = 0x60C // 1548 + SYS___FREXPL_B = 0x60D // 1549 + SYS___LDEXPF_B = 0x60E // 1550 + SYS___LDEXPL_B = 0x60F // 1551 + SYS___SINF_B = 0x610 // 1552 + SYS___SINL_B = 0x611 // 1553 + SYS___TANF_B = 0x612 // 1554 + SYS___TANL_B = 0x613 // 1555 + SYS___TANHF_B = 0x614 // 1556 + SYS___TANHL_B = 0x615 // 1557 + SYS___ACOSF_B = 0x616 // 1558 + SYS___ACOSL_B = 0x617 // 1559 + SYS___ASINF_B = 0x618 // 1560 + SYS___ASINL_B = 0x619 // 1561 + SYS___ATAN2F_B = 0x61A // 1562 + SYS___ATAN2L_B = 0x61B // 1563 + SYS___COSHF_B = 0x61C // 1564 + SYS___COSHL_B = 0x61D // 1565 + SYS___EXPF_B = 0x61E // 1566 + SYS___EXPL_B = 0x61F // 1567 + SYS___LOGF_B = 0x620 // 1568 + SYS___LOGL_B = 0x621 // 1569 + SYS___LOG10F_B = 0x622 // 1570 + SYS___LOG10L_B = 0x623 // 1571 + SYS___POWF_B = 0x624 // 1572 + SYS___POWL_B = 0x625 // 1573 + SYS___SINHF_B = 0x626 // 1574 + SYS___SINHL_B = 0x627 // 1575 + SYS___SQRTF_B = 0x628 // 1576 + SYS___SQRTL_B = 0x629 // 1577 + SYS___ABSF_B = 0x62A // 1578 + SYS___ABS_B = 0x62B // 1579 + SYS___ABSL_B = 0x62C // 1580 + SYS___FMODF_B = 0x62D // 1581 + SYS___FMODL_B = 0x62E // 1582 + SYS___MODFF_B = 0x62F // 1583 + SYS___MODFL_B = 0x630 // 1584 + SYS_ABSF = 0x631 // 1585 + SYS_ABSL = 0x632 // 1586 + SYS_ACOSF = 0x633 // 1587 + SYS_ACOSL = 0x634 // 1588 + SYS_ASINF = 0x635 // 1589 + SYS_ASINL = 0x636 // 1590 + SYS_ATAN2F = 0x637 // 1591 + SYS_ATAN2L = 0x638 // 1592 + SYS_ATANF = 0x639 // 1593 + SYS_ATANL = 0x63A // 1594 + SYS_CEILF = 0x63B // 1595 + SYS_CEILL = 0x63C // 1596 + SYS_COSF = 0x63D // 1597 + SYS_COSL = 0x63E // 1598 + SYS_COSHF = 0x63F // 1599 + SYS_COSHL = 0x640 // 1600 + SYS_EXPF = 0x641 // 1601 + SYS_EXPL = 0x642 // 1602 + SYS_TANHF = 0x643 // 1603 + SYS_TANHL = 0x644 // 1604 + SYS_LOG10F = 0x645 // 1605 + SYS_LOG10L = 0x646 // 1606 + SYS_LOGF = 0x647 // 1607 + SYS_LOGL = 0x648 // 1608 + SYS_POWF = 0x649 // 1609 + SYS_POWL = 0x64A // 1610 + SYS_SINF = 0x64B // 1611 + SYS_SINL = 0x64C // 1612 + SYS_SQRTF = 0x64D // 1613 + SYS_SQRTL = 0x64E // 1614 + SYS_SINHF = 0x64F // 1615 + SYS_SINHL = 0x650 // 1616 + SYS_TANF = 0x651 // 1617 + SYS_TANL = 0x652 // 1618 + SYS_FABSF = 0x653 // 1619 + SYS_FABSL = 0x654 // 1620 + SYS_FLOORF = 0x655 // 1621 + SYS_FLOORL = 0x656 // 1622 + SYS_FMODF = 0x657 // 1623 + SYS_FMODL = 0x658 // 1624 + SYS_FREXPF = 0x659 // 1625 + SYS_FREXPL = 0x65A // 1626 + SYS_LDEXPF = 0x65B // 1627 + SYS_LDEXPL = 0x65C // 1628 + SYS_MODFF = 0x65D // 1629 + SYS_MODFL = 0x65E // 1630 + SYS_BTOWC = 0x65F // 1631 + SYS___CHATTR = 0x660 // 1632 + SYS___FCHATTR = 0x661 // 1633 + SYS___TOCCSID = 0x662 // 1634 + SYS___CSNAMETYPE = 0x663 // 1635 + SYS___TOCSNAME = 0x664 // 1636 + SYS___CCSIDTYPE = 0x665 // 1637 + SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 + SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 + SYS_DN_FIND = 0x668 // 1640 + SYS___GETHOSTBYADDR_A = 0x669 // 1641 + SYS___GETHOSTBYNAME_A = 0x66A // 1642 + SYS___RES_INIT_A = 0x66B // 1643 + SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 + SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 + SYS___CHARMAP_INIT_A = 0x66E // 1646 + SYS___MBLEN_A = 0x66F // 1647 + SYS___MBLEN_SB_A = 0x670 // 1648 + SYS___MBLEN_STD_A = 0x671 // 1649 + SYS___MBLEN_UTF = 0x672 // 1650 + SYS___MBSTOWCS_A = 0x673 // 1651 + SYS___MBSTOWCS_STD_A = 0x674 // 1652 + SYS___MBTOWC_A = 0x675 // 1653 + SYS___MBTOWC_ISO1 = 0x676 // 1654 + SYS___MBTOWC_SBCS = 0x677 // 1655 + SYS___MBTOWC_MBCS = 0x678 // 1656 + SYS___MBTOWC_UTF = 0x679 // 1657 + SYS___WCSTOMBS_A = 0x67A // 1658 + SYS___WCSTOMBS_STD_A = 0x67B // 1659 + SYS___WCSWIDTH_A = 0x67C // 1660 + SYS___GETGRGID_R_A = 0x67D // 1661 + SYS___WCSWIDTH_STD_A = 0x67E // 1662 + SYS___WCSWIDTH_ASIA = 0x67F // 1663 + SYS___CSID_A = 0x680 // 1664 + SYS___CSID_STD_A = 0x681 // 1665 + SYS___WCSID_A = 0x682 // 1666 + SYS___WCSID_STD_A = 0x683 // 1667 + SYS___WCTOMB_A = 0x684 // 1668 + SYS___WCTOMB_ISO1 = 0x685 // 1669 + SYS___WCTOMB_STD_A = 0x686 // 1670 + SYS___WCTOMB_UTF = 0x687 // 1671 + SYS___WCWIDTH_A = 0x688 // 1672 + SYS___GETGRNAM_R_A = 0x689 // 1673 + SYS___WCWIDTH_STD_A = 0x68A // 1674 + SYS___WCWIDTH_ASIA = 0x68B // 1675 + SYS___GETPWNAM_R_A = 0x68C // 1676 + SYS___GETPWUID_R_A = 0x68D // 1677 + SYS___GETLOGIN_R_A = 0x68E // 1678 + SYS___TTYNAME_R_A = 0x68F // 1679 + SYS___READDIR_R_A = 0x690 // 1680 + SYS___E2A_S = 0x691 // 1681 + SYS___FNMATCH_A = 0x692 // 1682 + SYS___FNMATCH_C_A = 0x693 // 1683 + SYS___EXECL_A = 0x694 // 1684 + SYS___FNMATCH_STD_A = 0x695 // 1685 + SYS___REGCOMP_A = 0x696 // 1686 + SYS___REGCOMP_STD_A = 0x697 // 1687 + SYS___REGERROR_A = 0x698 // 1688 + SYS___REGERROR_STD_A = 0x699 // 1689 + SYS___REGEXEC_A = 0x69A // 1690 + SYS___REGEXEC_STD_A = 0x69B // 1691 + SYS___REGFREE_A = 0x69C // 1692 + SYS___REGFREE_STD_A = 0x69D // 1693 + SYS___STRCOLL_A = 0x69E // 1694 + SYS___STRCOLL_C_A = 0x69F // 1695 + SYS___EXECLE_A = 0x6A0 // 1696 + SYS___STRCOLL_STD_A = 0x6A1 // 1697 + SYS___STRXFRM_A = 0x6A2 // 1698 + SYS___STRXFRM_C_A = 0x6A3 // 1699 + SYS___EXECLP_A = 0x6A4 // 1700 + SYS___STRXFRM_STD_A = 0x6A5 // 1701 + SYS___WCSCOLL_A = 0x6A6 // 1702 + SYS___WCSCOLL_C_A = 0x6A7 // 1703 + SYS___WCSCOLL_STD_A = 0x6A8 // 1704 + SYS___WCSXFRM_A = 0x6A9 // 1705 + SYS___WCSXFRM_C_A = 0x6AA // 1706 + SYS___WCSXFRM_STD_A = 0x6AB // 1707 + SYS___COLLATE_INIT_A = 0x6AC // 1708 + SYS___WCTYPE_A = 0x6AD // 1709 + SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 + SYS___CTYPE_INIT_A = 0x6AF // 1711 + SYS___ISWCTYPE_A = 0x6B0 // 1712 + SYS___EXECV_A = 0x6B1 // 1713 + SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 + SYS___TOWLOWER_A = 0x6B3 // 1715 + SYS___TOWLOWER_STD_A = 0x6B4 // 1716 + SYS___TOWUPPER_A = 0x6B5 // 1717 + SYS___TOWUPPER_STD_A = 0x6B6 // 1718 + SYS___LOCALE_INIT_A = 0x6B7 // 1719 + SYS___LOCALECONV_A = 0x6B8 // 1720 + SYS___LOCALECONV_STD_A = 0x6B9 // 1721 + SYS___NL_LANGINFO_A = 0x6BA // 1722 + SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 + SYS___MONETARY_INIT_A = 0x6BC // 1724 + SYS___STRFMON_A = 0x6BD // 1725 + SYS___STRFMON_STD_A = 0x6BE // 1726 + SYS___GETADDRINFO_A = 0x6BF // 1727 + SYS___CATGETS_A = 0x6C0 // 1728 + SYS___EXECVE_A = 0x6C1 // 1729 + SYS___EXECVP_A = 0x6C2 // 1730 + SYS___SPAWN_A = 0x6C3 // 1731 + SYS___GETNAMEINFO_A = 0x6C4 // 1732 + SYS___SPAWNP_A = 0x6C5 // 1733 + SYS___NUMERIC_INIT_A = 0x6C6 // 1734 + SYS___RESP_INIT_A = 0x6C7 // 1735 + SYS___RPMATCH_A = 0x6C8 // 1736 + SYS___RPMATCH_C_A = 0x6C9 // 1737 + SYS___RPMATCH_STD_A = 0x6CA // 1738 + SYS___TIME_INIT_A = 0x6CB // 1739 + SYS___STRFTIME_A = 0x6CC // 1740 + SYS___STRFTIME_STD_A = 0x6CD // 1741 + SYS___STRPTIME_A = 0x6CE // 1742 + SYS___STRPTIME_STD_A = 0x6CF // 1743 + SYS___WCSFTIME_A = 0x6D0 // 1744 + SYS___WCSFTIME_STD_A = 0x6D1 // 1745 + SYS_____SPAWN2_A = 0x6D2 // 1746 + SYS_____SPAWNP2_A = 0x6D3 // 1747 + SYS___SYNTAX_INIT_A = 0x6D4 // 1748 + SYS___TOD_INIT_A = 0x6D5 // 1749 + SYS___NL_CSINFO_A = 0x6D6 // 1750 + SYS___NL_MONINFO_A = 0x6D7 // 1751 + SYS___NL_NUMINFO_A = 0x6D8 // 1752 + SYS___NL_RESPINFO_A = 0x6D9 // 1753 + SYS___NL_TIMINFO_A = 0x6DA // 1754 + SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 + SYS___IF_INDEXTONAME_A = 0x6DC // 1756 + SYS___PRINTF_A = 0x6DD // 1757 + SYS___ICONV_OPEN_A = 0x6DE // 1758 + SYS___DLLLOAD_A = 0x6DF // 1759 + SYS___DLLQUERYFN_A = 0x6E0 // 1760 + SYS___DLLQUERYVAR_A = 0x6E1 // 1761 + SYS_____CHATTR_A = 0x6E2 // 1762 + SYS___E2A_L = 0x6E3 // 1763 + SYS_____TOCCSID_A = 0x6E4 // 1764 + SYS_____TOCSNAME_A = 0x6E5 // 1765 + SYS_____CCSIDTYPE_A = 0x6E6 // 1766 + SYS_____CSNAMETYPE_A = 0x6E7 // 1767 + SYS___CHMOD_A = 0x6E8 // 1768 + SYS___MKDIR_A = 0x6E9 // 1769 + SYS___STAT_A = 0x6EA // 1770 + SYS___STAT_O_A = 0x6EB // 1771 + SYS___MKFIFO_A = 0x6EC // 1772 + SYS_____OPEN_STAT_A = 0x6ED // 1773 + SYS___LSTAT_A = 0x6EE // 1774 + SYS___LSTAT_O_A = 0x6EF // 1775 + SYS___MKNOD_A = 0x6F0 // 1776 + SYS___MOUNT_A = 0x6F1 // 1777 + SYS___UMOUNT_A = 0x6F2 // 1778 + SYS___CHAUDIT_A = 0x6F4 // 1780 + SYS___W_GETMNTENT_A = 0x6F5 // 1781 + SYS___CREAT_A = 0x6F6 // 1782 + SYS___OPEN_A = 0x6F7 // 1783 + SYS___SETLOCALE_A = 0x6F9 // 1785 + SYS___FPRINTF_A = 0x6FA // 1786 + SYS___SPRINTF_A = 0x6FB // 1787 + SYS___VFPRINTF_A = 0x6FC // 1788 + SYS___VPRINTF_A = 0x6FD // 1789 + SYS___VSPRINTF_A = 0x6FE // 1790 + SYS___VSWPRINTF_A = 0x6FF // 1791 + SYS___SWPRINTF_A = 0x700 // 1792 + SYS___FSCANF_A = 0x701 // 1793 + SYS___SCANF_A = 0x702 // 1794 + SYS___SSCANF_A = 0x703 // 1795 + SYS___SWSCANF_A = 0x704 // 1796 + SYS___ATOF_A = 0x705 // 1797 + SYS___ATOI_A = 0x706 // 1798 + SYS___ATOL_A = 0x707 // 1799 + SYS___STRTOD_A = 0x708 // 1800 + SYS___STRTOL_A = 0x709 // 1801 + SYS___STRTOUL_A = 0x70A // 1802 + SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 + SYS___A64L_A = 0x70C // 1804 + SYS___ECVT_A = 0x70D // 1805 + SYS___FCVT_A = 0x70E // 1806 + SYS___GCVT_A = 0x70F // 1807 + SYS___L64A_A = 0x710 // 1808 + SYS___STRERROR_A = 0x711 // 1809 + SYS___PERROR_A = 0x712 // 1810 + SYS___FETCH_A = 0x713 // 1811 + SYS___GETENV_A = 0x714 // 1812 + SYS___MKSTEMP_A = 0x717 // 1815 + SYS___PTSNAME_A = 0x718 // 1816 + SYS___PUTENV_A = 0x719 // 1817 + SYS___REALPATH_A = 0x71A // 1818 + SYS___SETENV_A = 0x71B // 1819 + SYS___SYSTEM_A = 0x71C // 1820 + SYS___GETOPT_A = 0x71D // 1821 + SYS___CATOPEN_A = 0x71E // 1822 + SYS___ACCESS_A = 0x71F // 1823 + SYS___CHDIR_A = 0x720 // 1824 + SYS___CHOWN_A = 0x721 // 1825 + SYS___CHROOT_A = 0x722 // 1826 + SYS___GETCWD_A = 0x723 // 1827 + SYS___GETWD_A = 0x724 // 1828 + SYS___LCHOWN_A = 0x725 // 1829 + SYS___LINK_A = 0x726 // 1830 + SYS___PATHCONF_A = 0x727 // 1831 + SYS___IF_NAMEINDEX_A = 0x728 // 1832 + SYS___READLINK_A = 0x729 // 1833 + SYS___RMDIR_A = 0x72A // 1834 + SYS___STATVFS_A = 0x72B // 1835 + SYS___SYMLINK_A = 0x72C // 1836 + SYS___TRUNCATE_A = 0x72D // 1837 + SYS___UNLINK_A = 0x72E // 1838 + SYS___GAI_STRERROR_A = 0x72F // 1839 + SYS___EXTLINK_NP_A = 0x730 // 1840 + SYS___ISALNUM_A = 0x731 // 1841 + SYS___ISALPHA_A = 0x732 // 1842 + SYS___A2E_S = 0x733 // 1843 + SYS___ISCNTRL_A = 0x734 // 1844 + SYS___ISDIGIT_A = 0x735 // 1845 + SYS___ISGRAPH_A = 0x736 // 1846 + SYS___ISLOWER_A = 0x737 // 1847 + SYS___ISPRINT_A = 0x738 // 1848 + SYS___ISPUNCT_A = 0x739 // 1849 + SYS___ISSPACE_A = 0x73A // 1850 + SYS___ISUPPER_A = 0x73B // 1851 + SYS___ISXDIGIT_A = 0x73C // 1852 + SYS___TOLOWER_A = 0x73D // 1853 + SYS___TOUPPER_A = 0x73E // 1854 + SYS___ISWALNUM_A = 0x73F // 1855 + SYS___ISWALPHA_A = 0x740 // 1856 + SYS___A2E_L = 0x741 // 1857 + SYS___ISWCNTRL_A = 0x742 // 1858 + SYS___ISWDIGIT_A = 0x743 // 1859 + SYS___ISWGRAPH_A = 0x744 // 1860 + SYS___ISWLOWER_A = 0x745 // 1861 + SYS___ISWPRINT_A = 0x746 // 1862 + SYS___ISWPUNCT_A = 0x747 // 1863 + SYS___ISWSPACE_A = 0x748 // 1864 + SYS___ISWUPPER_A = 0x749 // 1865 + SYS___ISWXDIGIT_A = 0x74A // 1866 + SYS___CONFSTR_A = 0x74B // 1867 + SYS___FTOK_A = 0x74C // 1868 + SYS___MKTEMP_A = 0x74D // 1869 + SYS___FDOPEN_A = 0x74E // 1870 + SYS___FLDATA_A = 0x74F // 1871 + SYS___REMOVE_A = 0x750 // 1872 + SYS___RENAME_A = 0x751 // 1873 + SYS___TMPNAM_A = 0x752 // 1874 + SYS___FOPEN_A = 0x753 // 1875 + SYS___FREOPEN_A = 0x754 // 1876 + SYS___CUSERID_A = 0x755 // 1877 + SYS___POPEN_A = 0x756 // 1878 + SYS___TEMPNAM_A = 0x757 // 1879 + SYS___FTW_A = 0x758 // 1880 + SYS___GETGRENT_A = 0x759 // 1881 + SYS___GETGRGID_A = 0x75A // 1882 + SYS___GETGRNAM_A = 0x75B // 1883 + SYS___GETGROUPSBYNAME_A = 0x75C // 1884 + SYS___GETHOSTENT_A = 0x75D // 1885 + SYS___GETHOSTNAME_A = 0x75E // 1886 + SYS___GETLOGIN_A = 0x75F // 1887 + SYS___INET_NTOP_A = 0x760 // 1888 + SYS___GETPASS_A = 0x761 // 1889 + SYS___GETPWENT_A = 0x762 // 1890 + SYS___GETPWNAM_A = 0x763 // 1891 + SYS___GETPWUID_A = 0x764 // 1892 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 + SYS___CHECKSCHENV_A = 0x766 // 1894 + SYS___CONNECTSERVER_A = 0x767 // 1895 + SYS___CONNECTWORKMGR_A = 0x768 // 1896 + SYS_____CONSOLE_A = 0x769 // 1897 + SYS___CREATEWORKUNIT_A = 0x76A // 1898 + SYS___CTERMID_A = 0x76B // 1899 + SYS___FMTMSG_A = 0x76C // 1900 + SYS___INITGROUPS_A = 0x76D // 1901 + SYS_____LOGIN_A = 0x76E // 1902 + SYS___MSGRCV_A = 0x76F // 1903 + SYS___MSGSND_A = 0x770 // 1904 + SYS___MSGXRCV_A = 0x771 // 1905 + SYS___NFTW_A = 0x772 // 1906 + SYS_____PASSWD_A = 0x773 // 1907 + SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 + SYS___QUERYMETRICS_A = 0x775 // 1909 + SYS___QUERYSCHENV = 0x776 // 1910 + SYS___READV_A = 0x777 // 1911 + SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 + SYS_____SERVER_INIT_A = 0x779 // 1913 + SYS_____SERVER_PWU_A = 0x77A // 1914 + SYS___STRCASECMP_A = 0x77B // 1915 + SYS___STRNCASECMP_A = 0x77C // 1916 + SYS___TTYNAME_A = 0x77D // 1917 + SYS___UNAME_A = 0x77E // 1918 + SYS___UTIMES_A = 0x77F // 1919 + SYS___W_GETPSENT_A = 0x780 // 1920 + SYS___WRITEV_A = 0x781 // 1921 + SYS___W_STATFS_A = 0x782 // 1922 + SYS___W_STATVFS_A = 0x783 // 1923 + SYS___FPUTC_A = 0x784 // 1924 + SYS___PUTCHAR_A = 0x785 // 1925 + SYS___PUTS_A = 0x786 // 1926 + SYS___FGETS_A = 0x787 // 1927 + SYS___GETS_A = 0x788 // 1928 + SYS___FPUTS_A = 0x789 // 1929 + SYS___FREAD_A = 0x78A // 1930 + SYS___FWRITE_A = 0x78B // 1931 + SYS___OPEN_O_A = 0x78C // 1932 + SYS___ISASCII = 0x78D // 1933 + SYS___CREAT_O_A = 0x78E // 1934 + SYS___ENVNA = 0x78F // 1935 + SYS___PUTC_A = 0x790 // 1936 + SYS___AE_THREAD_SETMODE = 0x791 // 1937 + SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 + SYS___GETNETBYADDR_A = 0x793 // 1939 + SYS___GETNETBYNAME_A = 0x794 // 1940 + SYS___GETNETENT_A = 0x795 // 1941 + SYS___GETPROTOBYNAME_A = 0x796 // 1942 + SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 + SYS___GETPROTOENT_A = 0x798 // 1944 + SYS___GETSERVBYNAME_A = 0x799 // 1945 + SYS___GETSERVBYPORT_A = 0x79A // 1946 + SYS___GETSERVENT_A = 0x79B // 1947 + SYS___ASCTIME_A = 0x79C // 1948 + SYS___CTIME_A = 0x79D // 1949 + SYS___GETDATE_A = 0x79E // 1950 + SYS___TZSET_A = 0x79F // 1951 + SYS___UTIME_A = 0x7A0 // 1952 + SYS___ASCTIME_R_A = 0x7A1 // 1953 + SYS___CTIME_R_A = 0x7A2 // 1954 + SYS___STRTOLL_A = 0x7A3 // 1955 + SYS___STRTOULL_A = 0x7A4 // 1956 + SYS___FPUTWC_A = 0x7A5 // 1957 + SYS___PUTWC_A = 0x7A6 // 1958 + SYS___PUTWCHAR_A = 0x7A7 // 1959 + SYS___FPUTWS_A = 0x7A8 // 1960 + SYS___UNGETWC_A = 0x7A9 // 1961 + SYS___FGETWC_A = 0x7AA // 1962 + SYS___GETWC_A = 0x7AB // 1963 + SYS___GETWCHAR_A = 0x7AC // 1964 + SYS___FGETWS_A = 0x7AD // 1965 + SYS___GETTIMEOFDAY_A = 0x7AE // 1966 + SYS___GMTIME_A = 0x7AF // 1967 + SYS___GMTIME_R_A = 0x7B0 // 1968 + SYS___LOCALTIME_A = 0x7B1 // 1969 + SYS___LOCALTIME_R_A = 0x7B2 // 1970 + SYS___MKTIME_A = 0x7B3 // 1971 + SYS___TZZNA = 0x7B4 // 1972 + SYS_UNATEXIT = 0x7B5 // 1973 + SYS___CEE3DMP_A = 0x7B6 // 1974 + SYS___CDUMP_A = 0x7B7 // 1975 + SYS___CSNAP_A = 0x7B8 // 1976 + SYS___CTEST_A = 0x7B9 // 1977 + SYS___CTRACE_A = 0x7BA // 1978 + SYS___VSWPRNTF2_A = 0x7BB // 1979 + SYS___INET_PTON_A = 0x7BC // 1980 + SYS___SYSLOG_A = 0x7BD // 1981 + SYS___CRYPT_A = 0x7BE // 1982 + SYS_____OPENDIR2_A = 0x7BF // 1983 + SYS_____READDIR2_A = 0x7C0 // 1984 + SYS___OPENDIR_A = 0x7C2 // 1986 + SYS___READDIR_A = 0x7C3 // 1987 + SYS_PREAD = 0x7C7 // 1991 + SYS_PWRITE = 0x7C8 // 1992 + SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 + SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 + SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 + SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 + SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 + SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 + SYS_FWPRINTF = 0x7D1 // 2001 + SYS_WPRINTF = 0x7D2 // 2002 + SYS_VFWPRINT = 0x7D3 // 2003 + SYS_VFWPRINTF = 0x7D3 // 2003 + SYS_VWPRINTF = 0x7D4 // 2004 + SYS_FWSCANF = 0x7D5 // 2005 + SYS_WSCANF = 0x7D6 // 2006 + SYS_WCTRANS = 0x7D7 // 2007 + SYS_TOWCTRAN = 0x7D8 // 2008 + SYS_TOWCTRANS = 0x7D8 // 2008 + SYS___WCSTOD_A = 0x7D9 // 2009 + SYS___WCSTOL_A = 0x7DA // 2010 + SYS___WCSTOUL_A = 0x7DB // 2011 + SYS___BASENAME_A = 0x7DC // 2012 + SYS___DIRNAME_A = 0x7DD // 2013 + SYS___GLOB_A = 0x7DE // 2014 + SYS_FWIDE = 0x7DF // 2015 + SYS___OSNAME = 0x7E0 // 2016 + SYS_____OSNAME_A = 0x7E1 // 2017 + SYS___BTOWC_A = 0x7E4 // 2020 + SYS___WCTOB_A = 0x7E5 // 2021 + SYS___DBM_OPEN_A = 0x7E6 // 2022 + SYS___VFPRINTF2_A = 0x7E7 // 2023 + SYS___VPRINTF2_A = 0x7E8 // 2024 + SYS___VSPRINTF2_A = 0x7E9 // 2025 + SYS___CEIL_H = 0x7EA // 2026 + SYS___FLOOR_H = 0x7EB // 2027 + SYS___MODF_H = 0x7EC // 2028 + SYS___FABS_H = 0x7ED // 2029 + SYS___J0_H = 0x7EE // 2030 + SYS___J1_H = 0x7EF // 2031 + SYS___JN_H = 0x7F0 // 2032 + SYS___Y0_H = 0x7F1 // 2033 + SYS___Y1_H = 0x7F2 // 2034 + SYS___YN_H = 0x7F3 // 2035 + SYS___CEILF_H = 0x7F4 // 2036 + SYS___CEILL_H = 0x7F5 // 2037 + SYS___FLOORF_H = 0x7F6 // 2038 + SYS___FLOORL_H = 0x7F7 // 2039 + SYS___MODFF_H = 0x7F8 // 2040 + SYS___MODFL_H = 0x7F9 // 2041 + SYS___FABSF_H = 0x7FA // 2042 + SYS___FABSL_H = 0x7FB // 2043 + SYS___MALLOC24 = 0x7FC // 2044 + SYS___MALLOC31 = 0x7FD // 2045 + SYS_ACL_INIT = 0x7FE // 2046 + SYS_ACL_FREE = 0x7FF // 2047 + SYS_ACL_FIRST_ENTRY = 0x800 // 2048 + SYS_ACL_GET_ENTRY = 0x801 // 2049 + SYS_ACL_VALID = 0x802 // 2050 + SYS_ACL_CREATE_ENTRY = 0x803 // 2051 + SYS_ACL_DELETE_ENTRY = 0x804 // 2052 + SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 + SYS_ACL_DELETE_FD = 0x806 // 2054 + SYS_ACL_DELETE_FILE = 0x807 // 2055 + SYS_ACL_GET_FD = 0x808 // 2056 + SYS_ACL_GET_FILE = 0x809 // 2057 + SYS_ACL_SET_FD = 0x80A // 2058 + SYS_ACL_SET_FILE = 0x80B // 2059 + SYS_ACL_FROM_TEXT = 0x80C // 2060 + SYS_ACL_TO_TEXT = 0x80D // 2061 + SYS_ACL_SORT = 0x80E // 2062 + SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 + SYS___ERFL_B = 0x810 // 2064 + SYS___ERFCL_B = 0x811 // 2065 + SYS___LGAMMAL_B = 0x812 // 2066 + SYS___SETHOOKEVENTS = 0x813 // 2067 + SYS_IF_NAMETOINDEX = 0x814 // 2068 + SYS_IF_INDEXTONAME = 0x815 // 2069 + SYS_IF_NAMEINDEX = 0x816 // 2070 + SYS_IF_FREENAMEINDEX = 0x817 // 2071 + SYS_GETADDRINFO = 0x818 // 2072 + SYS_GETNAMEINFO = 0x819 // 2073 + SYS_FREEADDRINFO = 0x81A // 2074 + SYS_GAI_STRERROR = 0x81B // 2075 + SYS_REXEC_AF = 0x81C // 2076 + SYS___POE = 0x81D // 2077 + SYS___DYNALLOC_A = 0x81F // 2079 + SYS___DYNFREE_A = 0x820 // 2080 + SYS___RES_QUERY_A = 0x821 // 2081 + SYS___RES_SEARCH_A = 0x822 // 2082 + SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 + SYS___RES_MKQUERY_A = 0x824 // 2084 + SYS___RES_SEND_A = 0x825 // 2085 + SYS___DN_EXPAND_A = 0x826 // 2086 + SYS___DN_SKIPNAME_A = 0x827 // 2087 + SYS___DN_COMP_A = 0x828 // 2088 + SYS___DN_FIND_A = 0x829 // 2089 + SYS___NLIST_A = 0x82A // 2090 + SYS_____TCGETCP_A = 0x82B // 2091 + SYS_____TCSETCP_A = 0x82C // 2092 + SYS_____W_PIOCTL_A = 0x82E // 2094 + SYS___INET_ADDR_A = 0x82F // 2095 + SYS___INET_NTOA_A = 0x830 // 2096 + SYS___INET_NETWORK_A = 0x831 // 2097 + SYS___ACCEPT_A = 0x832 // 2098 + SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 + SYS___BIND_A = 0x834 // 2100 + SYS___CONNECT_A = 0x835 // 2101 + SYS___GETPEERNAME_A = 0x836 // 2102 + SYS___GETSOCKNAME_A = 0x837 // 2103 + SYS___RECVFROM_A = 0x838 // 2104 + SYS___SENDTO_A = 0x839 // 2105 + SYS___SENDMSG_A = 0x83A // 2106 + SYS___RECVMSG_A = 0x83B // 2107 + SYS_____LCHATTR_A = 0x83C // 2108 + SYS___CABEND = 0x83D // 2109 + SYS___LE_CIB_GET = 0x83E // 2110 + SYS___SET_LAA_FOR_JIT = 0x83F // 2111 + SYS___LCHATTR = 0x840 // 2112 + SYS___WRITEDOWN = 0x841 // 2113 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 + SYS___ACOSHF_B = 0x843 // 2115 + SYS___ACOSHL_B = 0x844 // 2116 + SYS___ASINHF_B = 0x845 // 2117 + SYS___ASINHL_B = 0x846 // 2118 + SYS___ATANHF_B = 0x847 // 2119 + SYS___ATANHL_B = 0x848 // 2120 + SYS___CBRTF_B = 0x849 // 2121 + SYS___CBRTL_B = 0x84A // 2122 + SYS___COPYSIGNF_B = 0x84B // 2123 + SYS___COPYSIGNL_B = 0x84C // 2124 + SYS___COTANF_B = 0x84D // 2125 + SYS___COTAN_B = 0x84E // 2126 + SYS___COTANL_B = 0x84F // 2127 + SYS___EXP2F_B = 0x850 // 2128 + SYS___EXP2L_B = 0x851 // 2129 + SYS___EXPM1F_B = 0x852 // 2130 + SYS___EXPM1L_B = 0x853 // 2131 + SYS___FDIMF_B = 0x854 // 2132 + SYS___FDIM_B = 0x855 // 2133 + SYS___FDIML_B = 0x856 // 2134 + SYS___HYPOTF_B = 0x857 // 2135 + SYS___HYPOTL_B = 0x858 // 2136 + SYS___LOG1PF_B = 0x859 // 2137 + SYS___LOG1PL_B = 0x85A // 2138 + SYS___LOG2F_B = 0x85B // 2139 + SYS___LOG2_B = 0x85C // 2140 + SYS___LOG2L_B = 0x85D // 2141 + SYS___REMAINDERF_B = 0x85E // 2142 + SYS___REMAINDERL_B = 0x85F // 2143 + SYS___REMQUOF_B = 0x860 // 2144 + SYS___REMQUO_B = 0x861 // 2145 + SYS___REMQUOL_B = 0x862 // 2146 + SYS___TGAMMAF_B = 0x863 // 2147 + SYS___TGAMMA_B = 0x864 // 2148 + SYS___TGAMMAL_B = 0x865 // 2149 + SYS___TRUNCF_B = 0x866 // 2150 + SYS___TRUNC_B = 0x867 // 2151 + SYS___TRUNCL_B = 0x868 // 2152 + SYS___LGAMMAF_B = 0x869 // 2153 + SYS___LROUNDF_B = 0x86A // 2154 + SYS___LROUND_B = 0x86B // 2155 + SYS___ERFF_B = 0x86C // 2156 + SYS___ERFCF_B = 0x86D // 2157 + SYS_ACOSHF = 0x86E // 2158 + SYS_ACOSHL = 0x86F // 2159 + SYS_ASINHF = 0x870 // 2160 + SYS_ASINHL = 0x871 // 2161 + SYS_ATANHF = 0x872 // 2162 + SYS_ATANHL = 0x873 // 2163 + SYS_CBRTF = 0x874 // 2164 + SYS_CBRTL = 0x875 // 2165 + SYS_COPYSIGNF = 0x876 // 2166 + SYS_CPYSIGNF = 0x876 // 2166 + SYS_COPYSIGNL = 0x877 // 2167 + SYS_CPYSIGNL = 0x877 // 2167 + SYS_COTANF = 0x878 // 2168 + SYS___COTANF = 0x878 // 2168 + SYS_COTAN = 0x879 // 2169 + SYS___COTAN = 0x879 // 2169 + SYS_COTANL = 0x87A // 2170 + SYS___COTANL = 0x87A // 2170 + SYS_EXP2F = 0x87B // 2171 + SYS_EXP2L = 0x87C // 2172 + SYS_EXPM1F = 0x87D // 2173 + SYS_EXPM1L = 0x87E // 2174 + SYS_FDIMF = 0x87F // 2175 + SYS_FDIM = 0x881 // 2177 + SYS_FDIML = 0x882 // 2178 + SYS_HYPOTF = 0x883 // 2179 + SYS_HYPOTL = 0x884 // 2180 + SYS_LOG1PF = 0x885 // 2181 + SYS_LOG1PL = 0x886 // 2182 + SYS_LOG2F = 0x887 // 2183 + SYS_LOG2 = 0x888 // 2184 + SYS_LOG2L = 0x889 // 2185 + SYS_REMAINDERF = 0x88A // 2186 + SYS_REMAINDF = 0x88A // 2186 + SYS_REMAINDERL = 0x88B // 2187 + SYS_REMAINDL = 0x88B // 2187 + SYS_REMQUOF = 0x88C // 2188 + SYS_REMQUO = 0x88D // 2189 + SYS_REMQUOL = 0x88E // 2190 + SYS_TGAMMAF = 0x88F // 2191 + SYS_TGAMMA = 0x890 // 2192 + SYS_TGAMMAL = 0x891 // 2193 + SYS_TRUNCF = 0x892 // 2194 + SYS_TRUNC = 0x893 // 2195 + SYS_TRUNCL = 0x894 // 2196 + SYS_LGAMMAF = 0x895 // 2197 + SYS_LGAMMAL = 0x896 // 2198 + SYS_LROUNDF = 0x897 // 2199 + SYS_LROUND = 0x898 // 2200 + SYS_ERFF = 0x899 // 2201 + SYS_ERFL = 0x89A // 2202 + SYS_ERFCF = 0x89B // 2203 + SYS_ERFCL = 0x89C // 2204 + SYS___EXP2_B = 0x89D // 2205 + SYS_EXP2 = 0x89E // 2206 + SYS___FAR_JUMP = 0x89F // 2207 + SYS___TCGETATTR_A = 0x8A1 // 2209 + SYS___TCSETATTR_A = 0x8A2 // 2210 + SYS___SUPERKILL = 0x8A4 // 2212 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 + SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 + SYS___LE_MSG_GET = 0x8A7 // 2215 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 + SYS___LE_MSG_WRITE = 0x8A9 // 2217 + SYS___ITOA = 0x8AA // 2218 + SYS___UTOA = 0x8AB // 2219 + SYS___LTOA = 0x8AC // 2220 + SYS___ULTOA = 0x8AD // 2221 + SYS___LLTOA = 0x8AE // 2222 + SYS___ULLTOA = 0x8AF // 2223 + SYS___ITOA_A = 0x8B0 // 2224 + SYS___UTOA_A = 0x8B1 // 2225 + SYS___LTOA_A = 0x8B2 // 2226 + SYS___ULTOA_A = 0x8B3 // 2227 + SYS___LLTOA_A = 0x8B4 // 2228 + SYS___ULLTOA_A = 0x8B5 // 2229 + SYS_____GETENV_A = 0x8C3 // 2243 + SYS___REXEC_A = 0x8C4 // 2244 + SYS___REXEC_AF_A = 0x8C5 // 2245 + SYS___GETUTXENT_A = 0x8C6 // 2246 + SYS___GETUTXID_A = 0x8C7 // 2247 + SYS___GETUTXLINE_A = 0x8C8 // 2248 + SYS___PUTUTXLINE_A = 0x8C9 // 2249 + SYS_____UTMPXNAME_A = 0x8CA // 2250 + SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 + SYS___SNPRINTF_A = 0x8CD // 2253 + SYS___VSNPRINTF_A = 0x8CE // 2254 + SYS___DLOPEN_A = 0x8D0 // 2256 + SYS___DLSYM_A = 0x8D1 // 2257 + SYS___DLERROR_A = 0x8D2 // 2258 + SYS_FLOCKFILE = 0x8D3 // 2259 + SYS_FTRYLOCKFILE = 0x8D4 // 2260 + SYS_FUNLOCKFILE = 0x8D5 // 2261 + SYS_GETC_UNLOCKED = 0x8D6 // 2262 + SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 + SYS_PUTC_UNLOCKED = 0x8D8 // 2264 + SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 + SYS_SNPRINTF = 0x8DA // 2266 + SYS_VSNPRINTF = 0x8DB // 2267 + SYS_DLOPEN = 0x8DD // 2269 + SYS_DLSYM = 0x8DE // 2270 + SYS_DLCLOSE = 0x8DF // 2271 + SYS_DLERROR = 0x8E0 // 2272 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 + SYS___VHM_EVENT = 0x8E4 // 2276 + SYS___ABS_H = 0x8E6 // 2278 + SYS___ABSF_H = 0x8E7 // 2279 + SYS___ABSL_H = 0x8E8 // 2280 + SYS___ACOS_H = 0x8E9 // 2281 + SYS___ACOSF_H = 0x8EA // 2282 + SYS___ACOSL_H = 0x8EB // 2283 + SYS___ACOSH_H = 0x8EC // 2284 + SYS___ASIN_H = 0x8ED // 2285 + SYS___ASINF_H = 0x8EE // 2286 + SYS___ASINL_H = 0x8EF // 2287 + SYS___ASINH_H = 0x8F0 // 2288 + SYS___ATAN_H = 0x8F1 // 2289 + SYS___ATANF_H = 0x8F2 // 2290 + SYS___ATANL_H = 0x8F3 // 2291 + SYS___ATANH_H = 0x8F4 // 2292 + SYS___ATANHF_H = 0x8F5 // 2293 + SYS___ATANHL_H = 0x8F6 // 2294 + SYS___ATAN2_H = 0x8F7 // 2295 + SYS___ATAN2F_H = 0x8F8 // 2296 + SYS___ATAN2L_H = 0x8F9 // 2297 + SYS___CBRT_H = 0x8FA // 2298 + SYS___COPYSIGNF_H = 0x8FB // 2299 + SYS___COPYSIGNL_H = 0x8FC // 2300 + SYS___COS_H = 0x8FD // 2301 + SYS___COSF_H = 0x8FE // 2302 + SYS___COSL_H = 0x8FF // 2303 + SYS___COSHF_H = 0x900 // 2304 + SYS___COSHL_H = 0x901 // 2305 + SYS___COTAN_H = 0x902 // 2306 + SYS___COTANF_H = 0x903 // 2307 + SYS___COTANL_H = 0x904 // 2308 + SYS___ERF_H = 0x905 // 2309 + SYS___ERFF_H = 0x906 // 2310 + SYS___ERFL_H = 0x907 // 2311 + SYS___ERFC_H = 0x908 // 2312 + SYS___ERFCF_H = 0x909 // 2313 + SYS___ERFCL_H = 0x90A // 2314 + SYS___EXP_H = 0x90B // 2315 + SYS___EXPF_H = 0x90C // 2316 + SYS___EXPL_H = 0x90D // 2317 + SYS___EXPM1_H = 0x90E // 2318 + SYS___FDIM_H = 0x90F // 2319 + SYS___FDIMF_H = 0x910 // 2320 + SYS___FDIML_H = 0x911 // 2321 + SYS___FMOD_H = 0x912 // 2322 + SYS___FMODF_H = 0x913 // 2323 + SYS___FMODL_H = 0x914 // 2324 + SYS___GAMMA_H = 0x915 // 2325 + SYS___HYPOT_H = 0x916 // 2326 + SYS___ILOGB_H = 0x917 // 2327 + SYS___LGAMMA_H = 0x918 // 2328 + SYS___LGAMMAF_H = 0x919 // 2329 + SYS___LOG_H = 0x91A // 2330 + SYS___LOGF_H = 0x91B // 2331 + SYS___LOGL_H = 0x91C // 2332 + SYS___LOGB_H = 0x91D // 2333 + SYS___LOG2_H = 0x91E // 2334 + SYS___LOG2F_H = 0x91F // 2335 + SYS___LOG2L_H = 0x920 // 2336 + SYS___LOG1P_H = 0x921 // 2337 + SYS___LOG10_H = 0x922 // 2338 + SYS___LOG10F_H = 0x923 // 2339 + SYS___LOG10L_H = 0x924 // 2340 + SYS___LROUND_H = 0x925 // 2341 + SYS___LROUNDF_H = 0x926 // 2342 + SYS___NEXTAFTER_H = 0x927 // 2343 + SYS___POW_H = 0x928 // 2344 + SYS___POWF_H = 0x929 // 2345 + SYS___POWL_H = 0x92A // 2346 + SYS___REMAINDER_H = 0x92B // 2347 + SYS___RINT_H = 0x92C // 2348 + SYS___SCALB_H = 0x92D // 2349 + SYS___SIN_H = 0x92E // 2350 + SYS___SINF_H = 0x92F // 2351 + SYS___SINL_H = 0x930 // 2352 + SYS___SINH_H = 0x931 // 2353 + SYS___SINHF_H = 0x932 // 2354 + SYS___SINHL_H = 0x933 // 2355 + SYS___SQRT_H = 0x934 // 2356 + SYS___SQRTF_H = 0x935 // 2357 + SYS___SQRTL_H = 0x936 // 2358 + SYS___TAN_H = 0x937 // 2359 + SYS___TANF_H = 0x938 // 2360 + SYS___TANL_H = 0x939 // 2361 + SYS___TANH_H = 0x93A // 2362 + SYS___TANHF_H = 0x93B // 2363 + SYS___TANHL_H = 0x93C // 2364 + SYS___TGAMMA_H = 0x93D // 2365 + SYS___TGAMMAF_H = 0x93E // 2366 + SYS___TRUNC_H = 0x93F // 2367 + SYS___TRUNCF_H = 0x940 // 2368 + SYS___TRUNCL_H = 0x941 // 2369 + SYS___COSH_H = 0x942 // 2370 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 + SYS_VFSCANF = 0x944 // 2372 + SYS_VSCANF = 0x946 // 2374 + SYS_VSSCANF = 0x948 // 2376 + SYS_VFWSCANF = 0x94A // 2378 + SYS_VWSCANF = 0x94C // 2380 + SYS_VSWSCANF = 0x94E // 2382 + SYS_IMAXABS = 0x950 // 2384 + SYS_IMAXDIV = 0x951 // 2385 + SYS_STRTOIMAX = 0x952 // 2386 + SYS_STRTOUMAX = 0x953 // 2387 + SYS_WCSTOIMAX = 0x954 // 2388 + SYS_WCSTOUMAX = 0x955 // 2389 + SYS_ATOLL = 0x956 // 2390 + SYS_STRTOF = 0x957 // 2391 + SYS_STRTOLD = 0x958 // 2392 + SYS_WCSTOF = 0x959 // 2393 + SYS_WCSTOLD = 0x95A // 2394 + SYS_INET6_RTH_SPACE = 0x95B // 2395 + SYS_INET6_RTH_INIT = 0x95C // 2396 + SYS_INET6_RTH_ADD = 0x95D // 2397 + SYS_INET6_RTH_REVERSE = 0x95E // 2398 + SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 + SYS_INET6_RTH_GETADDR = 0x960 // 2400 + SYS_INET6_OPT_INIT = 0x961 // 2401 + SYS_INET6_OPT_APPEND = 0x962 // 2402 + SYS_INET6_OPT_FINISH = 0x963 // 2403 + SYS_INET6_OPT_SET_VAL = 0x964 // 2404 + SYS_INET6_OPT_NEXT = 0x965 // 2405 + SYS_INET6_OPT_FIND = 0x966 // 2406 + SYS_INET6_OPT_GET_VAL = 0x967 // 2407 + SYS___POW_I = 0x987 // 2439 + SYS___POW_I_B = 0x988 // 2440 + SYS___POW_I_H = 0x989 // 2441 + SYS___POW_II = 0x98A // 2442 + SYS___POW_II_B = 0x98B // 2443 + SYS___POW_II_H = 0x98C // 2444 + SYS_CABS = 0x98E // 2446 + SYS___CABS_B = 0x98F // 2447 + SYS___CABS_H = 0x990 // 2448 + SYS_CABSF = 0x991 // 2449 + SYS___CABSF_B = 0x992 // 2450 + SYS___CABSF_H = 0x993 // 2451 + SYS_CABSL = 0x994 // 2452 + SYS___CABSL_B = 0x995 // 2453 + SYS___CABSL_H = 0x996 // 2454 + SYS_CACOS = 0x997 // 2455 + SYS___CACOS_B = 0x998 // 2456 + SYS___CACOS_H = 0x999 // 2457 + SYS_CACOSF = 0x99A // 2458 + SYS___CACOSF_B = 0x99B // 2459 + SYS___CACOSF_H = 0x99C // 2460 + SYS_CACOSL = 0x99D // 2461 + SYS___CACOSL_B = 0x99E // 2462 + SYS___CACOSL_H = 0x99F // 2463 + SYS_CACOSH = 0x9A0 // 2464 + SYS___CACOSH_B = 0x9A1 // 2465 + SYS___CACOSH_H = 0x9A2 // 2466 + SYS_CACOSHF = 0x9A3 // 2467 + SYS___CACOSHF_B = 0x9A4 // 2468 + SYS___CACOSHF_H = 0x9A5 // 2469 + SYS_CACOSHL = 0x9A6 // 2470 + SYS___CACOSHL_B = 0x9A7 // 2471 + SYS___CACOSHL_H = 0x9A8 // 2472 + SYS_CARG = 0x9A9 // 2473 + SYS___CARG_B = 0x9AA // 2474 + SYS___CARG_H = 0x9AB // 2475 + SYS_CARGF = 0x9AC // 2476 + SYS___CARGF_B = 0x9AD // 2477 + SYS___CARGF_H = 0x9AE // 2478 + SYS_CARGL = 0x9AF // 2479 + SYS___CARGL_B = 0x9B0 // 2480 + SYS___CARGL_H = 0x9B1 // 2481 + SYS_CASIN = 0x9B2 // 2482 + SYS___CASIN_B = 0x9B3 // 2483 + SYS___CASIN_H = 0x9B4 // 2484 + SYS_CASINF = 0x9B5 // 2485 + SYS___CASINF_B = 0x9B6 // 2486 + SYS___CASINF_H = 0x9B7 // 2487 + SYS_CASINL = 0x9B8 // 2488 + SYS___CASINL_B = 0x9B9 // 2489 + SYS___CASINL_H = 0x9BA // 2490 + SYS_CASINH = 0x9BB // 2491 + SYS___CASINH_B = 0x9BC // 2492 + SYS___CASINH_H = 0x9BD // 2493 + SYS_CASINHF = 0x9BE // 2494 + SYS___CASINHF_B = 0x9BF // 2495 + SYS___CASINHF_H = 0x9C0 // 2496 + SYS_CASINHL = 0x9C1 // 2497 + SYS___CASINHL_B = 0x9C2 // 2498 + SYS___CASINHL_H = 0x9C3 // 2499 + SYS_CATAN = 0x9C4 // 2500 + SYS___CATAN_B = 0x9C5 // 2501 + SYS___CATAN_H = 0x9C6 // 2502 + SYS_CATANF = 0x9C7 // 2503 + SYS___CATANF_B = 0x9C8 // 2504 + SYS___CATANF_H = 0x9C9 // 2505 + SYS_CATANL = 0x9CA // 2506 + SYS___CATANL_B = 0x9CB // 2507 + SYS___CATANL_H = 0x9CC // 2508 + SYS_CATANH = 0x9CD // 2509 + SYS___CATANH_B = 0x9CE // 2510 + SYS___CATANH_H = 0x9CF // 2511 + SYS_CATANHF = 0x9D0 // 2512 + SYS___CATANHF_B = 0x9D1 // 2513 + SYS___CATANHF_H = 0x9D2 // 2514 + SYS_CATANHL = 0x9D3 // 2515 + SYS___CATANHL_B = 0x9D4 // 2516 + SYS___CATANHL_H = 0x9D5 // 2517 + SYS_CCOS = 0x9D6 // 2518 + SYS___CCOS_B = 0x9D7 // 2519 + SYS___CCOS_H = 0x9D8 // 2520 + SYS_CCOSF = 0x9D9 // 2521 + SYS___CCOSF_B = 0x9DA // 2522 + SYS___CCOSF_H = 0x9DB // 2523 + SYS_CCOSL = 0x9DC // 2524 + SYS___CCOSL_B = 0x9DD // 2525 + SYS___CCOSL_H = 0x9DE // 2526 + SYS_CCOSH = 0x9DF // 2527 + SYS___CCOSH_B = 0x9E0 // 2528 + SYS___CCOSH_H = 0x9E1 // 2529 + SYS_CCOSHF = 0x9E2 // 2530 + SYS___CCOSHF_B = 0x9E3 // 2531 + SYS___CCOSHF_H = 0x9E4 // 2532 + SYS_CCOSHL = 0x9E5 // 2533 + SYS___CCOSHL_B = 0x9E6 // 2534 + SYS___CCOSHL_H = 0x9E7 // 2535 + SYS_CEXP = 0x9E8 // 2536 + SYS___CEXP_B = 0x9E9 // 2537 + SYS___CEXP_H = 0x9EA // 2538 + SYS_CEXPF = 0x9EB // 2539 + SYS___CEXPF_B = 0x9EC // 2540 + SYS___CEXPF_H = 0x9ED // 2541 + SYS_CEXPL = 0x9EE // 2542 + SYS___CEXPL_B = 0x9EF // 2543 + SYS___CEXPL_H = 0x9F0 // 2544 + SYS_CIMAG = 0x9F1 // 2545 + SYS___CIMAG_B = 0x9F2 // 2546 + SYS___CIMAG_H = 0x9F3 // 2547 + SYS_CIMAGF = 0x9F4 // 2548 + SYS___CIMAGF_B = 0x9F5 // 2549 + SYS___CIMAGF_H = 0x9F6 // 2550 + SYS_CIMAGL = 0x9F7 // 2551 + SYS___CIMAGL_B = 0x9F8 // 2552 + SYS___CIMAGL_H = 0x9F9 // 2553 + SYS___CLOG = 0x9FA // 2554 + SYS___CLOG_B = 0x9FB // 2555 + SYS___CLOG_H = 0x9FC // 2556 + SYS_CLOGF = 0x9FD // 2557 + SYS___CLOGF_B = 0x9FE // 2558 + SYS___CLOGF_H = 0x9FF // 2559 + SYS_CLOGL = 0xA00 // 2560 + SYS___CLOGL_B = 0xA01 // 2561 + SYS___CLOGL_H = 0xA02 // 2562 + SYS_CONJ = 0xA03 // 2563 + SYS___CONJ_B = 0xA04 // 2564 + SYS___CONJ_H = 0xA05 // 2565 + SYS_CONJF = 0xA06 // 2566 + SYS___CONJF_B = 0xA07 // 2567 + SYS___CONJF_H = 0xA08 // 2568 + SYS_CONJL = 0xA09 // 2569 + SYS___CONJL_B = 0xA0A // 2570 + SYS___CONJL_H = 0xA0B // 2571 + SYS_CPOW = 0xA0C // 2572 + SYS___CPOW_B = 0xA0D // 2573 + SYS___CPOW_H = 0xA0E // 2574 + SYS_CPOWF = 0xA0F // 2575 + SYS___CPOWF_B = 0xA10 // 2576 + SYS___CPOWF_H = 0xA11 // 2577 + SYS_CPOWL = 0xA12 // 2578 + SYS___CPOWL_B = 0xA13 // 2579 + SYS___CPOWL_H = 0xA14 // 2580 + SYS_CPROJ = 0xA15 // 2581 + SYS___CPROJ_B = 0xA16 // 2582 + SYS___CPROJ_H = 0xA17 // 2583 + SYS_CPROJF = 0xA18 // 2584 + SYS___CPROJF_B = 0xA19 // 2585 + SYS___CPROJF_H = 0xA1A // 2586 + SYS_CPROJL = 0xA1B // 2587 + SYS___CPROJL_B = 0xA1C // 2588 + SYS___CPROJL_H = 0xA1D // 2589 + SYS_CREAL = 0xA1E // 2590 + SYS___CREAL_B = 0xA1F // 2591 + SYS___CREAL_H = 0xA20 // 2592 + SYS_CREALF = 0xA21 // 2593 + SYS___CREALF_B = 0xA22 // 2594 + SYS___CREALF_H = 0xA23 // 2595 + SYS_CREALL = 0xA24 // 2596 + SYS___CREALL_B = 0xA25 // 2597 + SYS___CREALL_H = 0xA26 // 2598 + SYS_CSIN = 0xA27 // 2599 + SYS___CSIN_B = 0xA28 // 2600 + SYS___CSIN_H = 0xA29 // 2601 + SYS_CSINF = 0xA2A // 2602 + SYS___CSINF_B = 0xA2B // 2603 + SYS___CSINF_H = 0xA2C // 2604 + SYS_CSINL = 0xA2D // 2605 + SYS___CSINL_B = 0xA2E // 2606 + SYS___CSINL_H = 0xA2F // 2607 + SYS_CSINH = 0xA30 // 2608 + SYS___CSINH_B = 0xA31 // 2609 + SYS___CSINH_H = 0xA32 // 2610 + SYS_CSINHF = 0xA33 // 2611 + SYS___CSINHF_B = 0xA34 // 2612 + SYS___CSINHF_H = 0xA35 // 2613 + SYS_CSINHL = 0xA36 // 2614 + SYS___CSINHL_B = 0xA37 // 2615 + SYS___CSINHL_H = 0xA38 // 2616 + SYS_CSQRT = 0xA39 // 2617 + SYS___CSQRT_B = 0xA3A // 2618 + SYS___CSQRT_H = 0xA3B // 2619 + SYS_CSQRTF = 0xA3C // 2620 + SYS___CSQRTF_B = 0xA3D // 2621 + SYS___CSQRTF_H = 0xA3E // 2622 + SYS_CSQRTL = 0xA3F // 2623 + SYS___CSQRTL_B = 0xA40 // 2624 + SYS___CSQRTL_H = 0xA41 // 2625 + SYS_CTAN = 0xA42 // 2626 + SYS___CTAN_B = 0xA43 // 2627 + SYS___CTAN_H = 0xA44 // 2628 + SYS_CTANF = 0xA45 // 2629 + SYS___CTANF_B = 0xA46 // 2630 + SYS___CTANF_H = 0xA47 // 2631 + SYS_CTANL = 0xA48 // 2632 + SYS___CTANL_B = 0xA49 // 2633 + SYS___CTANL_H = 0xA4A // 2634 + SYS_CTANH = 0xA4B // 2635 + SYS___CTANH_B = 0xA4C // 2636 + SYS___CTANH_H = 0xA4D // 2637 + SYS_CTANHF = 0xA4E // 2638 + SYS___CTANHF_B = 0xA4F // 2639 + SYS___CTANHF_H = 0xA50 // 2640 + SYS_CTANHL = 0xA51 // 2641 + SYS___CTANHL_B = 0xA52 // 2642 + SYS___CTANHL_H = 0xA53 // 2643 + SYS___ACOSHF_H = 0xA54 // 2644 + SYS___ACOSHL_H = 0xA55 // 2645 + SYS___ASINHF_H = 0xA56 // 2646 + SYS___ASINHL_H = 0xA57 // 2647 + SYS___CBRTF_H = 0xA58 // 2648 + SYS___CBRTL_H = 0xA59 // 2649 + SYS___COPYSIGN_B = 0xA5A // 2650 + SYS___EXPM1F_H = 0xA5B // 2651 + SYS___EXPM1L_H = 0xA5C // 2652 + SYS___EXP2_H = 0xA5D // 2653 + SYS___EXP2F_H = 0xA5E // 2654 + SYS___EXP2L_H = 0xA5F // 2655 + SYS___LOG1PF_H = 0xA60 // 2656 + SYS___LOG1PL_H = 0xA61 // 2657 + SYS___LGAMMAL_H = 0xA62 // 2658 + SYS_FMA = 0xA63 // 2659 + SYS___FMA_B = 0xA64 // 2660 + SYS___FMA_H = 0xA65 // 2661 + SYS_FMAF = 0xA66 // 2662 + SYS___FMAF_B = 0xA67 // 2663 + SYS___FMAF_H = 0xA68 // 2664 + SYS_FMAL = 0xA69 // 2665 + SYS___FMAL_B = 0xA6A // 2666 + SYS___FMAL_H = 0xA6B // 2667 + SYS_FMAX = 0xA6C // 2668 + SYS___FMAX_B = 0xA6D // 2669 + SYS___FMAX_H = 0xA6E // 2670 + SYS_FMAXF = 0xA6F // 2671 + SYS___FMAXF_B = 0xA70 // 2672 + SYS___FMAXF_H = 0xA71 // 2673 + SYS_FMAXL = 0xA72 // 2674 + SYS___FMAXL_B = 0xA73 // 2675 + SYS___FMAXL_H = 0xA74 // 2676 + SYS_FMIN = 0xA75 // 2677 + SYS___FMIN_B = 0xA76 // 2678 + SYS___FMIN_H = 0xA77 // 2679 + SYS_FMINF = 0xA78 // 2680 + SYS___FMINF_B = 0xA79 // 2681 + SYS___FMINF_H = 0xA7A // 2682 + SYS_FMINL = 0xA7B // 2683 + SYS___FMINL_B = 0xA7C // 2684 + SYS___FMINL_H = 0xA7D // 2685 + SYS_ILOGBF = 0xA7E // 2686 + SYS___ILOGBF_B = 0xA7F // 2687 + SYS___ILOGBF_H = 0xA80 // 2688 + SYS_ILOGBL = 0xA81 // 2689 + SYS___ILOGBL_B = 0xA82 // 2690 + SYS___ILOGBL_H = 0xA83 // 2691 + SYS_LLRINT = 0xA84 // 2692 + SYS___LLRINT_B = 0xA85 // 2693 + SYS___LLRINT_H = 0xA86 // 2694 + SYS_LLRINTF = 0xA87 // 2695 + SYS___LLRINTF_B = 0xA88 // 2696 + SYS___LLRINTF_H = 0xA89 // 2697 + SYS_LLRINTL = 0xA8A // 2698 + SYS___LLRINTL_B = 0xA8B // 2699 + SYS___LLRINTL_H = 0xA8C // 2700 + SYS_LLROUND = 0xA8D // 2701 + SYS___LLROUND_B = 0xA8E // 2702 + SYS___LLROUND_H = 0xA8F // 2703 + SYS_LLROUNDF = 0xA90 // 2704 + SYS___LLROUNDF_B = 0xA91 // 2705 + SYS___LLROUNDF_H = 0xA92 // 2706 + SYS_LLROUNDL = 0xA93 // 2707 + SYS___LLROUNDL_B = 0xA94 // 2708 + SYS___LLROUNDL_H = 0xA95 // 2709 + SYS_LOGBF = 0xA96 // 2710 + SYS___LOGBF_B = 0xA97 // 2711 + SYS___LOGBF_H = 0xA98 // 2712 + SYS_LOGBL = 0xA99 // 2713 + SYS___LOGBL_B = 0xA9A // 2714 + SYS___LOGBL_H = 0xA9B // 2715 + SYS_LRINT = 0xA9C // 2716 + SYS___LRINT_B = 0xA9D // 2717 + SYS___LRINT_H = 0xA9E // 2718 + SYS_LRINTF = 0xA9F // 2719 + SYS___LRINTF_B = 0xAA0 // 2720 + SYS___LRINTF_H = 0xAA1 // 2721 + SYS_LRINTL = 0xAA2 // 2722 + SYS___LRINTL_B = 0xAA3 // 2723 + SYS___LRINTL_H = 0xAA4 // 2724 + SYS_LROUNDL = 0xAA5 // 2725 + SYS___LROUNDL_B = 0xAA6 // 2726 + SYS___LROUNDL_H = 0xAA7 // 2727 + SYS_NAN = 0xAA8 // 2728 + SYS___NAN_B = 0xAA9 // 2729 + SYS_NANF = 0xAAA // 2730 + SYS___NANF_B = 0xAAB // 2731 + SYS_NANL = 0xAAC // 2732 + SYS___NANL_B = 0xAAD // 2733 + SYS_NEARBYINT = 0xAAE // 2734 + SYS___NEARBYINT_B = 0xAAF // 2735 + SYS___NEARBYINT_H = 0xAB0 // 2736 + SYS_NEARBYINTF = 0xAB1 // 2737 + SYS___NEARBYINTF_B = 0xAB2 // 2738 + SYS___NEARBYINTF_H = 0xAB3 // 2739 + SYS_NEARBYINTL = 0xAB4 // 2740 + SYS___NEARBYINTL_B = 0xAB5 // 2741 + SYS___NEARBYINTL_H = 0xAB6 // 2742 + SYS_NEXTAFTERF = 0xAB7 // 2743 + SYS___NEXTAFTERF_B = 0xAB8 // 2744 + SYS___NEXTAFTERF_H = 0xAB9 // 2745 + SYS_NEXTAFTERL = 0xABA // 2746 + SYS___NEXTAFTERL_B = 0xABB // 2747 + SYS___NEXTAFTERL_H = 0xABC // 2748 + SYS_NEXTTOWARD = 0xABD // 2749 + SYS___NEXTTOWARD_B = 0xABE // 2750 + SYS___NEXTTOWARD_H = 0xABF // 2751 + SYS_NEXTTOWARDF = 0xAC0 // 2752 + SYS___NEXTTOWARDF_B = 0xAC1 // 2753 + SYS___NEXTTOWARDF_H = 0xAC2 // 2754 + SYS_NEXTTOWARDL = 0xAC3 // 2755 + SYS___NEXTTOWARDL_B = 0xAC4 // 2756 + SYS___NEXTTOWARDL_H = 0xAC5 // 2757 + SYS___REMAINDERF_H = 0xAC6 // 2758 + SYS___REMAINDERL_H = 0xAC7 // 2759 + SYS___REMQUO_H = 0xAC8 // 2760 + SYS___REMQUOF_H = 0xAC9 // 2761 + SYS___REMQUOL_H = 0xACA // 2762 + SYS_RINTF = 0xACB // 2763 + SYS___RINTF_B = 0xACC // 2764 + SYS_RINTL = 0xACD // 2765 + SYS___RINTL_B = 0xACE // 2766 + SYS_ROUND = 0xACF // 2767 + SYS___ROUND_B = 0xAD0 // 2768 + SYS___ROUND_H = 0xAD1 // 2769 + SYS_ROUNDF = 0xAD2 // 2770 + SYS___ROUNDF_B = 0xAD3 // 2771 + SYS___ROUNDF_H = 0xAD4 // 2772 + SYS_ROUNDL = 0xAD5 // 2773 + SYS___ROUNDL_B = 0xAD6 // 2774 + SYS___ROUNDL_H = 0xAD7 // 2775 + SYS_SCALBLN = 0xAD8 // 2776 + SYS___SCALBLN_B = 0xAD9 // 2777 + SYS___SCALBLN_H = 0xADA // 2778 + SYS_SCALBLNF = 0xADB // 2779 + SYS___SCALBLNF_B = 0xADC // 2780 + SYS___SCALBLNF_H = 0xADD // 2781 + SYS_SCALBLNL = 0xADE // 2782 + SYS___SCALBLNL_B = 0xADF // 2783 + SYS___SCALBLNL_H = 0xAE0 // 2784 + SYS___SCALBN_B = 0xAE1 // 2785 + SYS___SCALBN_H = 0xAE2 // 2786 + SYS_SCALBNF = 0xAE3 // 2787 + SYS___SCALBNF_B = 0xAE4 // 2788 + SYS___SCALBNF_H = 0xAE5 // 2789 + SYS_SCALBNL = 0xAE6 // 2790 + SYS___SCALBNL_B = 0xAE7 // 2791 + SYS___SCALBNL_H = 0xAE8 // 2792 + SYS___TGAMMAL_H = 0xAE9 // 2793 + SYS_FECLEAREXCEPT = 0xAEA // 2794 + SYS_FEGETENV = 0xAEB // 2795 + SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 + SYS_FEGETROUND = 0xAED // 2797 + SYS_FEHOLDEXCEPT = 0xAEE // 2798 + SYS_FERAISEEXCEPT = 0xAEF // 2799 + SYS_FESETENV = 0xAF0 // 2800 + SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 + SYS_FESETROUND = 0xAF2 // 2802 + SYS_FETESTEXCEPT = 0xAF3 // 2803 + SYS_FEUPDATEENV = 0xAF4 // 2804 + SYS___COPYSIGN_H = 0xAF5 // 2805 + SYS___HYPOTF_H = 0xAF6 // 2806 + SYS___HYPOTL_H = 0xAF7 // 2807 + SYS___CLASS = 0xAFA // 2810 + SYS___CLASS_B = 0xAFB // 2811 + SYS___CLASS_H = 0xAFC // 2812 + SYS___ISBLANK_A = 0xB2E // 2862 + SYS___ISWBLANK_A = 0xB2F // 2863 + SYS___LROUND_FIXUP = 0xB30 // 2864 + SYS___LROUNDF_FIXUP = 0xB31 // 2865 + SYS_SCHED_YIELD = 0xB32 // 2866 + SYS_STRERROR_R = 0xB33 // 2867 + SYS_UNSETENV = 0xB34 // 2868 + SYS___LGAMMA_H_C99 = 0xB38 // 2872 + SYS___LGAMMA_B_C99 = 0xB39 // 2873 + SYS___LGAMMA_R_C99 = 0xB3A // 2874 + SYS___FTELL2 = 0xB3B // 2875 + SYS___FSEEK2 = 0xB3C // 2876 + SYS___STATIC_REINIT = 0xB3D // 2877 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 + SYS___TGAMMA_H_C99 = 0xB78 // 2936 + SYS___TGAMMAF_H_C99 = 0xB79 // 2937 + SYS___LE_TRACEBACK = 0xB7A // 2938 + SYS___MUST_STAY_CLEAN = 0xB7C // 2940 + SYS___O_ENV = 0xB7D // 2941 + SYS_ACOSD32 = 0xB7E // 2942 + SYS_ACOSD64 = 0xB7F // 2943 + SYS_ACOSD128 = 0xB80 // 2944 + SYS_ACOSHD32 = 0xB81 // 2945 + SYS_ACOSHD64 = 0xB82 // 2946 + SYS_ACOSHD128 = 0xB83 // 2947 + SYS_ASIND32 = 0xB84 // 2948 + SYS_ASIND64 = 0xB85 // 2949 + SYS_ASIND128 = 0xB86 // 2950 + SYS_ASINHD32 = 0xB87 // 2951 + SYS_ASINHD64 = 0xB88 // 2952 + SYS_ASINHD128 = 0xB89 // 2953 + SYS_ATAND32 = 0xB8A // 2954 + SYS_ATAND64 = 0xB8B // 2955 + SYS_ATAND128 = 0xB8C // 2956 + SYS_ATAN2D32 = 0xB8D // 2957 + SYS_ATAN2D64 = 0xB8E // 2958 + SYS_ATAN2D128 = 0xB8F // 2959 + SYS_ATANHD32 = 0xB90 // 2960 + SYS_ATANHD64 = 0xB91 // 2961 + SYS_ATANHD128 = 0xB92 // 2962 + SYS_CBRTD32 = 0xB93 // 2963 + SYS_CBRTD64 = 0xB94 // 2964 + SYS_CBRTD128 = 0xB95 // 2965 + SYS_CEILD32 = 0xB96 // 2966 + SYS_CEILD64 = 0xB97 // 2967 + SYS_CEILD128 = 0xB98 // 2968 + SYS___CLASS2 = 0xB99 // 2969 + SYS___CLASS2_B = 0xB9A // 2970 + SYS___CLASS2_H = 0xB9B // 2971 + SYS_COPYSIGND32 = 0xB9C // 2972 + SYS_COPYSIGND64 = 0xB9D // 2973 + SYS_COPYSIGND128 = 0xB9E // 2974 + SYS_COSD32 = 0xB9F // 2975 + SYS_COSD64 = 0xBA0 // 2976 + SYS_COSD128 = 0xBA1 // 2977 + SYS_COSHD32 = 0xBA2 // 2978 + SYS_COSHD64 = 0xBA3 // 2979 + SYS_COSHD128 = 0xBA4 // 2980 + SYS_ERFD32 = 0xBA5 // 2981 + SYS_ERFD64 = 0xBA6 // 2982 + SYS_ERFD128 = 0xBA7 // 2983 + SYS_ERFCD32 = 0xBA8 // 2984 + SYS_ERFCD64 = 0xBA9 // 2985 + SYS_ERFCD128 = 0xBAA // 2986 + SYS_EXPD32 = 0xBAB // 2987 + SYS_EXPD64 = 0xBAC // 2988 + SYS_EXPD128 = 0xBAD // 2989 + SYS_EXP2D32 = 0xBAE // 2990 + SYS_EXP2D64 = 0xBAF // 2991 + SYS_EXP2D128 = 0xBB0 // 2992 + SYS_EXPM1D32 = 0xBB1 // 2993 + SYS_EXPM1D64 = 0xBB2 // 2994 + SYS_EXPM1D128 = 0xBB3 // 2995 + SYS_FABSD32 = 0xBB4 // 2996 + SYS_FABSD64 = 0xBB5 // 2997 + SYS_FABSD128 = 0xBB6 // 2998 + SYS_FDIMD32 = 0xBB7 // 2999 + SYS_FDIMD64 = 0xBB8 // 3000 + SYS_FDIMD128 = 0xBB9 // 3001 + SYS_FE_DEC_GETROUND = 0xBBA // 3002 + SYS_FE_DEC_SETROUND = 0xBBB // 3003 + SYS_FLOORD32 = 0xBBC // 3004 + SYS_FLOORD64 = 0xBBD // 3005 + SYS_FLOORD128 = 0xBBE // 3006 + SYS_FMAD32 = 0xBBF // 3007 + SYS_FMAD64 = 0xBC0 // 3008 + SYS_FMAD128 = 0xBC1 // 3009 + SYS_FMAXD32 = 0xBC2 // 3010 + SYS_FMAXD64 = 0xBC3 // 3011 + SYS_FMAXD128 = 0xBC4 // 3012 + SYS_FMIND32 = 0xBC5 // 3013 + SYS_FMIND64 = 0xBC6 // 3014 + SYS_FMIND128 = 0xBC7 // 3015 + SYS_FMODD32 = 0xBC8 // 3016 + SYS_FMODD64 = 0xBC9 // 3017 + SYS_FMODD128 = 0xBCA // 3018 + SYS___FP_CAST_D = 0xBCB // 3019 + SYS_FREXPD32 = 0xBCC // 3020 + SYS_FREXPD64 = 0xBCD // 3021 + SYS_FREXPD128 = 0xBCE // 3022 + SYS_HYPOTD32 = 0xBCF // 3023 + SYS_HYPOTD64 = 0xBD0 // 3024 + SYS_HYPOTD128 = 0xBD1 // 3025 + SYS_ILOGBD32 = 0xBD2 // 3026 + SYS_ILOGBD64 = 0xBD3 // 3027 + SYS_ILOGBD128 = 0xBD4 // 3028 + SYS_LDEXPD32 = 0xBD5 // 3029 + SYS_LDEXPD64 = 0xBD6 // 3030 + SYS_LDEXPD128 = 0xBD7 // 3031 + SYS_LGAMMAD32 = 0xBD8 // 3032 + SYS_LGAMMAD64 = 0xBD9 // 3033 + SYS_LGAMMAD128 = 0xBDA // 3034 + SYS_LLRINTD32 = 0xBDB // 3035 + SYS_LLRINTD64 = 0xBDC // 3036 + SYS_LLRINTD128 = 0xBDD // 3037 + SYS_LLROUNDD32 = 0xBDE // 3038 + SYS_LLROUNDD64 = 0xBDF // 3039 + SYS_LLROUNDD128 = 0xBE0 // 3040 + SYS_LOGD32 = 0xBE1 // 3041 + SYS_LOGD64 = 0xBE2 // 3042 + SYS_LOGD128 = 0xBE3 // 3043 + SYS_LOG10D32 = 0xBE4 // 3044 + SYS_LOG10D64 = 0xBE5 // 3045 + SYS_LOG10D128 = 0xBE6 // 3046 + SYS_LOG1PD32 = 0xBE7 // 3047 + SYS_LOG1PD64 = 0xBE8 // 3048 + SYS_LOG1PD128 = 0xBE9 // 3049 + SYS_LOG2D32 = 0xBEA // 3050 + SYS_LOG2D64 = 0xBEB // 3051 + SYS_LOG2D128 = 0xBEC // 3052 + SYS_LOGBD32 = 0xBED // 3053 + SYS_LOGBD64 = 0xBEE // 3054 + SYS_LOGBD128 = 0xBEF // 3055 + SYS_LRINTD32 = 0xBF0 // 3056 + SYS_LRINTD64 = 0xBF1 // 3057 + SYS_LRINTD128 = 0xBF2 // 3058 + SYS_LROUNDD32 = 0xBF3 // 3059 + SYS_LROUNDD64 = 0xBF4 // 3060 + SYS_LROUNDD128 = 0xBF5 // 3061 + SYS_MODFD32 = 0xBF6 // 3062 + SYS_MODFD64 = 0xBF7 // 3063 + SYS_MODFD128 = 0xBF8 // 3064 + SYS_NAND32 = 0xBF9 // 3065 + SYS_NAND64 = 0xBFA // 3066 + SYS_NAND128 = 0xBFB // 3067 + SYS_NEARBYINTD32 = 0xBFC // 3068 + SYS_NEARBYINTD64 = 0xBFD // 3069 + SYS_NEARBYINTD128 = 0xBFE // 3070 + SYS_NEXTAFTERD32 = 0xBFF // 3071 + SYS_NEXTAFTERD64 = 0xC00 // 3072 + SYS_NEXTAFTERD128 = 0xC01 // 3073 + SYS_NEXTTOWARDD32 = 0xC02 // 3074 + SYS_NEXTTOWARDD64 = 0xC03 // 3075 + SYS_NEXTTOWARDD128 = 0xC04 // 3076 + SYS_POWD32 = 0xC05 // 3077 + SYS_POWD64 = 0xC06 // 3078 + SYS_POWD128 = 0xC07 // 3079 + SYS_QUANTIZED32 = 0xC08 // 3080 + SYS_QUANTIZED64 = 0xC09 // 3081 + SYS_QUANTIZED128 = 0xC0A // 3082 + SYS_REMAINDERD32 = 0xC0B // 3083 + SYS_REMAINDERD64 = 0xC0C // 3084 + SYS_REMAINDERD128 = 0xC0D // 3085 + SYS___REMQUOD32 = 0xC0E // 3086 + SYS___REMQUOD64 = 0xC0F // 3087 + SYS___REMQUOD128 = 0xC10 // 3088 + SYS_RINTD32 = 0xC11 // 3089 + SYS_RINTD64 = 0xC12 // 3090 + SYS_RINTD128 = 0xC13 // 3091 + SYS_ROUNDD32 = 0xC14 // 3092 + SYS_ROUNDD64 = 0xC15 // 3093 + SYS_ROUNDD128 = 0xC16 // 3094 + SYS_SAMEQUANTUMD32 = 0xC17 // 3095 + SYS_SAMEQUANTUMD64 = 0xC18 // 3096 + SYS_SAMEQUANTUMD128 = 0xC19 // 3097 + SYS_SCALBLND32 = 0xC1A // 3098 + SYS_SCALBLND64 = 0xC1B // 3099 + SYS_SCALBLND128 = 0xC1C // 3100 + SYS_SCALBND32 = 0xC1D // 3101 + SYS_SCALBND64 = 0xC1E // 3102 + SYS_SCALBND128 = 0xC1F // 3103 + SYS_SIND32 = 0xC20 // 3104 + SYS_SIND64 = 0xC21 // 3105 + SYS_SIND128 = 0xC22 // 3106 + SYS_SINHD32 = 0xC23 // 3107 + SYS_SINHD64 = 0xC24 // 3108 + SYS_SINHD128 = 0xC25 // 3109 + SYS_SQRTD32 = 0xC26 // 3110 + SYS_SQRTD64 = 0xC27 // 3111 + SYS_SQRTD128 = 0xC28 // 3112 + SYS_STRTOD32 = 0xC29 // 3113 + SYS_STRTOD64 = 0xC2A // 3114 + SYS_STRTOD128 = 0xC2B // 3115 + SYS_TAND32 = 0xC2C // 3116 + SYS_TAND64 = 0xC2D // 3117 + SYS_TAND128 = 0xC2E // 3118 + SYS_TANHD32 = 0xC2F // 3119 + SYS_TANHD64 = 0xC30 // 3120 + SYS_TANHD128 = 0xC31 // 3121 + SYS_TGAMMAD32 = 0xC32 // 3122 + SYS_TGAMMAD64 = 0xC33 // 3123 + SYS_TGAMMAD128 = 0xC34 // 3124 + SYS_TRUNCD32 = 0xC3E // 3134 + SYS_TRUNCD64 = 0xC3F // 3135 + SYS_TRUNCD128 = 0xC40 // 3136 + SYS_WCSTOD32 = 0xC41 // 3137 + SYS_WCSTOD64 = 0xC42 // 3138 + SYS_WCSTOD128 = 0xC43 // 3139 + SYS___CODEPAGE_INFO = 0xC64 // 3172 + SYS_POSIX_OPENPT = 0xC66 // 3174 + SYS_PSELECT = 0xC67 // 3175 + SYS_SOCKATMARK = 0xC68 // 3176 + SYS_AIO_FSYNC = 0xC69 // 3177 + SYS_LIO_LISTIO = 0xC6A // 3178 + SYS___ATANPID32 = 0xC6B // 3179 + SYS___ATANPID64 = 0xC6C // 3180 + SYS___ATANPID128 = 0xC6D // 3181 + SYS___COSPID32 = 0xC6E // 3182 + SYS___COSPID64 = 0xC6F // 3183 + SYS___COSPID128 = 0xC70 // 3184 + SYS___SINPID32 = 0xC71 // 3185 + SYS___SINPID64 = 0xC72 // 3186 + SYS___SINPID128 = 0xC73 // 3187 + SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 + SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 + SYS_SETSOURCEFILTER = 0xC78 // 3192 + SYS_GETSOURCEFILTER = 0xC79 // 3193 + SYS_FWRITE_UNLOCKED = 0xC7A // 3194 + SYS_FREAD_UNLOCKED = 0xC7B // 3195 + SYS_FGETS_UNLOCKED = 0xC7C // 3196 + SYS_GETS_UNLOCKED = 0xC7D // 3197 + SYS_FPUTS_UNLOCKED = 0xC7E // 3198 + SYS_PUTS_UNLOCKED = 0xC7F // 3199 + SYS_FGETC_UNLOCKED = 0xC80 // 3200 + SYS_FPUTC_UNLOCKED = 0xC81 // 3201 + SYS_DLADDR = 0xC82 // 3202 + SYS_SHM_OPEN = 0xC8C // 3212 + SYS_SHM_UNLINK = 0xC8D // 3213 + SYS___CLASS2F = 0xC91 // 3217 + SYS___CLASS2L = 0xC92 // 3218 + SYS___CLASS2F_B = 0xC93 // 3219 + SYS___CLASS2F_H = 0xC94 // 3220 + SYS___CLASS2L_B = 0xC95 // 3221 + SYS___CLASS2L_H = 0xC96 // 3222 + SYS___CLASS2D32 = 0xC97 // 3223 + SYS___CLASS2D64 = 0xC98 // 3224 + SYS___CLASS2D128 = 0xC99 // 3225 + SYS___TOCSNAME2 = 0xC9A // 3226 + SYS___D1TOP = 0xC9B // 3227 + SYS___D2TOP = 0xC9C // 3228 + SYS___D4TOP = 0xC9D // 3229 + SYS___PTOD1 = 0xC9E // 3230 + SYS___PTOD2 = 0xC9F // 3231 + SYS___PTOD4 = 0xCA0 // 3232 + SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 + SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 + SYS_FEOF_UNLOCKED = 0xCA3 // 3235 + SYS_FERROR_UNLOCKED = 0xCA4 // 3236 + SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 + SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 + SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 + SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 + SYS_FILENO_UNLOCKED = 0xCA9 // 3241 + SYS_FLDATA_UNLOCKED = 0xCAA // 3242 + SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 + SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 + SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 + SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 + SYS_FSCANF_UNLOCKED = 0xCAF // 3247 + SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 + SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 + SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 + SYS_FTELL_UNLOCKED = 0xCB4 // 3252 + SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 + SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 + SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 + SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 + SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 + SYS_GETWC_UNLOCKED = 0xCBB // 3259 + SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 + SYS_PERROR_UNLOCKED = 0xCBD // 3261 + SYS_PRINTF_UNLOCKED = 0xCBE // 3262 + SYS_PUTWC_UNLOCKED = 0xCBF // 3263 + SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 + SYS_REWIND_UNLOCKED = 0xCC1 // 3265 + SYS_SCANF_UNLOCKED = 0xCC2 // 3266 + SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 + SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 + SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 + SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 + SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 + SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 + SYS_VSCANF_UNLOCKED = 0xCCF // 3279 + SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 + SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 + SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 + SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 + SYS_ASCTIME64 = 0xCD7 // 3287 + SYS_ASCTIME64_R = 0xCD8 // 3288 + SYS_CTIME64 = 0xCD9 // 3289 + SYS_CTIME64_R = 0xCDA // 3290 + SYS_DIFFTIME64 = 0xCDB // 3291 + SYS_GMTIME64 = 0xCDC // 3292 + SYS_GMTIME64_R = 0xCDD // 3293 + SYS_LOCALTIME64 = 0xCDE // 3294 + SYS_LOCALTIME64_R = 0xCDF // 3295 + SYS_MKTIME64 = 0xCE0 // 3296 + SYS_TIME64 = 0xCE1 // 3297 + SYS___LOGIN_APPLID = 0xCE2 // 3298 + SYS___PASSWD_APPLID = 0xCE3 // 3299 + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 + SYS___GETTHENT = 0xCE5 // 3301 + SYS_FREEIFADDRS = 0xCE6 // 3302 + SYS_GETIFADDRS = 0xCE7 // 3303 + SYS_POSIX_FALLOCATE = 0xCE8 // 3304 + SYS_POSIX_MEMALIGN = 0xCE9 // 3305 + SYS_SIZEOF_ALLOC = 0xCEA // 3306 + SYS_RESIZE_ALLOC = 0xCEB // 3307 + SYS_FREAD_NOUPDATE = 0xCEC // 3308 + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 + SYS_FGETPOS64 = 0xCEE // 3310 + SYS_FSEEK64 = 0xCEF // 3311 + SYS_FSEEKO64 = 0xCF0 // 3312 + SYS_FSETPOS64 = 0xCF1 // 3313 + SYS_FTELL64 = 0xCF2 // 3314 + SYS_FTELLO64 = 0xCF3 // 3315 + SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 + SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 + SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 + SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 + SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 + SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 + SYS_FOPEN_UNLOCKED = 0xCFA // 3322 + SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 + SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 + SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 + SYS___MOSERVICES = 0xD3D // 3389 + SYS___GETTOD = 0xD3E // 3390 + SYS_C16RTOMB = 0xD40 // 3392 + SYS_C32RTOMB = 0xD41 // 3393 + SYS_MBRTOC16 = 0xD42 // 3394 + SYS_MBRTOC32 = 0xD43 // 3395 + SYS_QUANTEXPD32 = 0xD44 // 3396 + SYS_QUANTEXPD64 = 0xD45 // 3397 + SYS_QUANTEXPD128 = 0xD46 // 3398 + SYS___LOCALE_CTL = 0xD47 // 3399 + SYS___SMF_RECORD2 = 0xD48 // 3400 + SYS_FOPEN64 = 0xD49 // 3401 + SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 + SYS_FREOPEN64 = 0xD4B // 3403 + SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 + SYS_TMPFILE64 = 0xD4D // 3405 + SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 + SYS_GETDATE64 = 0xD4F // 3407 + SYS_GETTIMEOFDAY64 = 0xD50 // 3408 + SYS_BIND2ADDRSEL = 0xD59 // 3417 + SYS_INET6_IS_SRCADDR = 0xD5A // 3418 + SYS___GETGRGID1 = 0xD5B // 3419 + SYS___GETGRNAM1 = 0xD5C // 3420 + SYS___FBUFSIZE = 0xD60 // 3424 + SYS___FPENDING = 0xD61 // 3425 + SYS___FLBF = 0xD62 // 3426 + SYS___FREADABLE = 0xD63 // 3427 + SYS___FWRITABLE = 0xD64 // 3428 + SYS___FREADING = 0xD65 // 3429 + SYS___FWRITING = 0xD66 // 3430 + SYS___FSETLOCKING = 0xD67 // 3431 + SYS__FLUSHLBF = 0xD68 // 3432 + SYS___FPURGE = 0xD69 // 3433 + SYS___FREADAHEAD = 0xD6A // 3434 + SYS___FSETERR = 0xD6B // 3435 + SYS___FPENDING_UNLOCKED = 0xD6C // 3436 + SYS___FREADING_UNLOCKED = 0xD6D // 3437 + SYS___FWRITING_UNLOCKED = 0xD6E // 3438 + SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 + SYS___FPURGE_UNLOCKED = 0xD70 // 3440 + SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 + SYS___LE_CEEGTJS = 0xD72 // 3442 + SYS___LE_RECORD_DUMP = 0xD73 // 3443 + SYS_FSTAT64 = 0xD74 // 3444 + SYS_LSTAT64 = 0xD75 // 3445 + SYS_STAT64 = 0xD76 // 3446 + SYS___READDIR2_64 = 0xD77 // 3447 + SYS___OPEN_STAT64 = 0xD78 // 3448 + SYS_FTW64 = 0xD79 // 3449 + SYS_NFTW64 = 0xD7A // 3450 + SYS_UTIME64 = 0xD7B // 3451 + SYS_UTIMES64 = 0xD7C // 3452 + SYS___GETIPC64 = 0xD7D // 3453 + SYS_MSGCTL64 = 0xD7E // 3454 + SYS_SEMCTL64 = 0xD7F // 3455 + SYS_SHMCTL64 = 0xD80 // 3456 + SYS_MSGXRCV64 = 0xD81 // 3457 + SYS___MGXR64 = 0xD81 // 3457 + SYS_W_GETPSENT64 = 0xD82 // 3458 + SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 + SYS_FTIME64 = 0xD85 // 3461 + SYS_GETUTXENT64 = 0xD86 // 3462 + SYS_GETUTXID64 = 0xD87 // 3463 + SYS_GETUTXLINE64 = 0xD88 // 3464 + SYS_PUTUTXLINE64 = 0xD89 // 3465 + SYS_NEWLOCALE = 0xD8A // 3466 + SYS_FREELOCALE = 0xD8B // 3467 + SYS_USELOCALE = 0xD8C // 3468 + SYS_DUPLOCALE = 0xD8D // 3469 + SYS___CHATTR64 = 0xD9C // 3484 + SYS___LCHATTR64 = 0xD9D // 3485 + SYS___FCHATTR64 = 0xD9E // 3486 + SYS_____CHATTR64_A = 0xD9F // 3487 + SYS_____LCHATTR64_A = 0xDA0 // 3488 + SYS___LE_CEEUSGD = 0xDA1 // 3489 + SYS___LE_IFAM_CON = 0xDA2 // 3490 + SYS___LE_IFAM_DSC = 0xDA3 // 3491 + SYS___LE_IFAM_GET = 0xDA4 // 3492 + SYS___LE_IFAM_QRY = 0xDA5 // 3493 + SYS_ALIGNED_ALLOC = 0xDA6 // 3494 + SYS_ACCEPT4 = 0xDA7 // 3495 + SYS___ACCEPT4_A = 0xDA8 // 3496 + SYS_COPYFILERANGE = 0xDA9 // 3497 + SYS_GETLINE = 0xDAA // 3498 + SYS___GETLINE_A = 0xDAB // 3499 + SYS_DIRFD = 0xDAC // 3500 + SYS_CLOCK_GETTIME = 0xDAD // 3501 + SYS_DUP3 = 0xDAE // 3502 + SYS_EPOLL_CREATE = 0xDAF // 3503 + SYS_EPOLL_CREATE1 = 0xDB0 // 3504 + SYS_EPOLL_CTL = 0xDB1 // 3505 + SYS_EPOLL_WAIT = 0xDB2 // 3506 + SYS_EPOLL_PWAIT = 0xDB3 // 3507 + SYS_EVENTFD = 0xDB4 // 3508 + SYS_STATFS = 0xDB5 // 3509 + SYS___STATFS_A = 0xDB6 // 3510 + SYS_FSTATFS = 0xDB7 // 3511 + SYS_INOTIFY_INIT = 0xDB8 // 3512 + SYS_INOTIFY_INIT1 = 0xDB9 // 3513 + SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 + SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 + SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 + SYS_PIPE2 = 0xDBD // 3517 + SYS_PIVOT_ROOT = 0xDBE // 3518 + SYS___PIVOT_ROOT_A = 0xDBF // 3519 + SYS_PRCTL = 0xDC0 // 3520 + SYS_PRLIMIT = 0xDC1 // 3521 + SYS_SETHOSTNAME = 0xDC2 // 3522 + SYS___SETHOSTNAME_A = 0xDC3 // 3523 + SYS_SETRESUID = 0xDC4 // 3524 + SYS_SETRESGID = 0xDC5 // 3525 + SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 + SYS_FLOCK = 0xDC7 // 3527 + SYS_FGETXATTR = 0xDC8 // 3528 + SYS___FGETXATTR_A = 0xDC9 // 3529 + SYS_FLISTXATTR = 0xDCA // 3530 + SYS___FLISTXATTR_A = 0xDCB // 3531 + SYS_FREMOVEXATTR = 0xDCC // 3532 + SYS___FREMOVEXATTR_A = 0xDCD // 3533 + SYS_FSETXATTR = 0xDCE // 3534 + SYS___FSETXATTR_A = 0xDCF // 3535 + SYS_GETXATTR = 0xDD0 // 3536 + SYS___GETXATTR_A = 0xDD1 // 3537 + SYS_LGETXATTR = 0xDD2 // 3538 + SYS___LGETXATTR_A = 0xDD3 // 3539 + SYS_LISTXATTR = 0xDD4 // 3540 + SYS___LISTXATTR_A = 0xDD5 // 3541 + SYS_LLISTXATTR = 0xDD6 // 3542 + SYS___LLISTXATTR_A = 0xDD7 // 3543 + SYS_LREMOVEXATTR = 0xDD8 // 3544 + SYS___LREMOVEXATTR_A = 0xDD9 // 3545 + SYS_LSETXATTR = 0xDDA // 3546 + SYS___LSETXATTR_A = 0xDDB // 3547 + SYS_REMOVEXATTR = 0xDDC // 3548 + SYS___REMOVEXATTR_A = 0xDDD // 3549 + SYS_SETXATTR = 0xDDE // 3550 + SYS___SETXATTR_A = 0xDDF // 3551 + SYS_FDATASYNC = 0xDE0 // 3552 + SYS_SYNCFS = 0xDE1 // 3553 + SYS_FUTIMES = 0xDE2 // 3554 + SYS_FUTIMESAT = 0xDE3 // 3555 + SYS___FUTIMESAT_A = 0xDE4 // 3556 + SYS_LUTIMES = 0xDE5 // 3557 + SYS___LUTIMES_A = 0xDE6 // 3558 + SYS_INET_ATON = 0xDE7 // 3559 + SYS_GETRANDOM = 0xDE8 // 3560 + SYS_GETTID = 0xDE9 // 3561 + SYS_MEMFD_CREATE = 0xDEA // 3562 + SYS___MEMFD_CREATE_A = 0xDEB // 3563 + SYS_FACCESSAT = 0xDEC // 3564 + SYS___FACCESSAT_A = 0xDED // 3565 + SYS_FCHMODAT = 0xDEE // 3566 + SYS___FCHMODAT_A = 0xDEF // 3567 + SYS_FCHOWNAT = 0xDF0 // 3568 + SYS___FCHOWNAT_A = 0xDF1 // 3569 + SYS_FSTATAT = 0xDF2 // 3570 + SYS___FSTATAT_A = 0xDF3 // 3571 + SYS_LINKAT = 0xDF4 // 3572 + SYS___LINKAT_A = 0xDF5 // 3573 + SYS_MKDIRAT = 0xDF6 // 3574 + SYS___MKDIRAT_A = 0xDF7 // 3575 + SYS_MKFIFOAT = 0xDF8 // 3576 + SYS___MKFIFOAT_A = 0xDF9 // 3577 + SYS_MKNODAT = 0xDFA // 3578 + SYS___MKNODAT_A = 0xDFB // 3579 + SYS_OPENAT = 0xDFC // 3580 + SYS___OPENAT_A = 0xDFD // 3581 + SYS_READLINKAT = 0xDFE // 3582 + SYS___READLINKAT_A = 0xDFF // 3583 + SYS_RENAMEAT = 0xE00 // 3584 + SYS___RENAMEAT_A = 0xE01 // 3585 + SYS_RENAMEAT2 = 0xE02 // 3586 + SYS___RENAMEAT2_A = 0xE03 // 3587 + SYS_SYMLINKAT = 0xE04 // 3588 + SYS___SYMLINKAT_A = 0xE05 // 3589 + SYS_UNLINKAT = 0xE06 // 3590 + SYS___UNLINKAT_A = 0xE07 // 3591 + SYS_SYSINFO = 0xE08 // 3592 + SYS_WAIT4 = 0xE0A // 3594 + SYS_CLONE = 0xE0B // 3595 + SYS_UNSHARE = 0xE0C // 3596 + SYS_SETNS = 0xE0D // 3597 + SYS_CAPGET = 0xE0E // 3598 + SYS_CAPSET = 0xE0F // 3599 + SYS_STRCHRNUL = 0xE10 // 3600 + SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 + SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 + SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 + SYS___INET_ATON_A = 0xE15 // 3605 + SYS_MOUNT1 = 0xE16 // 3606 + SYS___MOUNT1_A = 0xE17 // 3607 + SYS_UMOUNT1 = 0xE18 // 3608 + SYS___UMOUNT1_A = 0xE19 // 3609 + SYS_UMOUNT2 = 0xE1A // 3610 + SYS___UMOUNT2_A = 0xE1B // 3611 + SYS___PRCTL_A = 0xE1C // 3612 + SYS_LOCALTIME_R2 = 0xE1D // 3613 + SYS___LOCALTIME_R2_A = 0xE1E // 3614 + SYS_OPENAT2 = 0xE1F // 3615 + SYS___OPENAT2_A = 0xE20 // 3616 + SYS___LE_CEEMICT = 0xE21 // 3617 + SYS_GETENTROPY = 0xE22 // 3618 + SYS_NANOSLEEP = 0xE23 // 3619 + SYS_UTIMENSAT = 0xE24 // 3620 + SYS___UTIMENSAT_A = 0xE25 // 3621 + SYS_ASPRINTF = 0xE26 // 3622 + SYS___ASPRINTF_A = 0xE27 // 3623 + SYS_VASPRINTF = 0xE28 // 3624 + SYS___VASPRINTF_A = 0xE29 // 3625 + SYS_DPRINTF = 0xE2A // 3626 + SYS___DPRINTF_A = 0xE2B // 3627 + SYS_GETOPT_LONG = 0xE2C // 3628 + SYS___GETOPT_LONG_A = 0xE2D // 3629 + SYS_PSIGNAL = 0xE2E // 3630 + SYS___PSIGNAL_A = 0xE2F // 3631 + SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 + SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 + SYS_FSTATAT_O = 0xE32 // 3634 + SYS___FSTATAT_O_A = 0xE33 // 3635 + SYS_FSTATAT64 = 0xE34 // 3636 + SYS___FSTATAT64_A = 0xE35 // 3637 + SYS___CHATTRAT = 0xE36 // 3638 + SYS_____CHATTRAT_A = 0xE37 // 3639 + SYS___CHATTRAT64 = 0xE38 // 3640 + SYS_____CHATTRAT64_A = 0xE39 // 3641 + SYS_MADVISE = 0xE3A // 3642 + SYS___AUTHENTICATE = 0xE3B // 3643 + ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index dc0c955e..4740b834 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -836,6 +836,15 @@ const ( FSPICK_EMPTY_PATH = 0x8 FSMOUNT_CLOEXEC = 0x1 + + FSCONFIG_SET_FLAG = 0x0 + FSCONFIG_SET_STRING = 0x1 + FSCONFIG_SET_BINARY = 0x2 + FSCONFIG_SET_PATH = 0x3 + FSCONFIG_SET_PATH_EMPTY = 0x4 + FSCONFIG_SET_FD = 0x5 + FSCONFIG_CMD_CREATE = 0x6 + FSCONFIG_CMD_RECONFIGURE = 0x7 ) type OpenHow struct { @@ -1169,7 +1178,8 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 + PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1189,7 +1199,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 - PERF_SAMPLE_BRANCH_MAX = 0x80000 + PERF_SAMPLE_BRANCH_MAX = 0x100000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1550,6 +1560,7 @@ const ( IFLA_DEVLINK_PORT = 0x3e IFLA_GSO_IPV4_MAX_SIZE = 0x3f IFLA_GRO_IPV4_MAX_SIZE = 0x40 + IFLA_DPLL_PIN = 0x41 IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1565,6 +1576,7 @@ const ( IFLA_INET6_ICMP6STATS = 0x6 IFLA_INET6_TOKEN = 0x7 IFLA_INET6_ADDR_GEN_MODE = 0x8 + IFLA_INET6_RA_MTU = 0x9 IFLA_BR_UNSPEC = 0x0 IFLA_BR_FORWARD_DELAY = 0x1 IFLA_BR_HELLO_TIME = 0x2 @@ -1612,6 +1624,9 @@ const ( IFLA_BR_MCAST_MLD_VERSION = 0x2c IFLA_BR_VLAN_STATS_PER_PORT = 0x2d IFLA_BR_MULTI_BOOLOPT = 0x2e + IFLA_BR_MCAST_QUERIER_STATE = 0x2f + IFLA_BR_FDB_N_LEARNED = 0x30 + IFLA_BR_FDB_MAX_LEARNED = 0x31 IFLA_BRPORT_UNSPEC = 0x0 IFLA_BRPORT_STATE = 0x1 IFLA_BRPORT_PRIORITY = 0x2 @@ -1649,6 +1664,14 @@ const ( IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 IFLA_BRPORT_MRP_IN_OPEN = 0x24 + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT = 0x25 + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT = 0x26 + IFLA_BRPORT_LOCKED = 0x27 + IFLA_BRPORT_MAB = 0x28 + IFLA_BRPORT_MCAST_N_GROUPS = 0x29 + IFLA_BRPORT_MCAST_MAX_GROUPS = 0x2a + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS = 0x2b + IFLA_BRPORT_BACKUP_NHID = 0x2c IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1670,6 +1693,9 @@ const ( IFLA_MACVLAN_MACADDR = 0x4 IFLA_MACVLAN_MACADDR_DATA = 0x5 IFLA_MACVLAN_MACADDR_COUNT = 0x6 + IFLA_MACVLAN_BC_QUEUE_LEN = 0x7 + IFLA_MACVLAN_BC_QUEUE_LEN_USED = 0x8 + IFLA_MACVLAN_BC_CUTOFF = 0x9 IFLA_VRF_UNSPEC = 0x0 IFLA_VRF_TABLE = 0x1 IFLA_VRF_PORT_UNSPEC = 0x0 @@ -1693,9 +1719,22 @@ const ( IFLA_XFRM_UNSPEC = 0x0 IFLA_XFRM_LINK = 0x1 IFLA_XFRM_IF_ID = 0x2 + IFLA_XFRM_COLLECT_METADATA = 0x3 IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 + IFLA_NETKIT_UNSPEC = 0x0 + IFLA_NETKIT_PEER_INFO = 0x1 + IFLA_NETKIT_PRIMARY = 0x2 + IFLA_NETKIT_POLICY = 0x3 + IFLA_NETKIT_PEER_POLICY = 0x4 + IFLA_NETKIT_MODE = 0x5 IFLA_VXLAN_UNSPEC = 0x0 IFLA_VXLAN_ID = 0x1 IFLA_VXLAN_GROUP = 0x2 @@ -1726,6 +1765,8 @@ const ( IFLA_VXLAN_GPE = 0x1b IFLA_VXLAN_TTL_INHERIT = 0x1c IFLA_VXLAN_DF = 0x1d + IFLA_VXLAN_VNIFILTER = 0x1e + IFLA_VXLAN_LOCALBYPASS = 0x1f IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1740,6 +1781,7 @@ const ( IFLA_GENEVE_LABEL = 0xb IFLA_GENEVE_TTL_INHERIT = 0xc IFLA_GENEVE_DF = 0xd + IFLA_GENEVE_INNER_PROTO_INHERIT = 0xe IFLA_BAREUDP_UNSPEC = 0x0 IFLA_BAREUDP_PORT = 0x1 IFLA_BAREUDP_ETHERTYPE = 0x2 @@ -1752,6 +1794,8 @@ const ( IFLA_GTP_FD1 = 0x2 IFLA_GTP_PDP_HASHSIZE = 0x3 IFLA_GTP_ROLE = 0x4 + IFLA_GTP_CREATE_SOCKETS = 0x5 + IFLA_GTP_RESTART_COUNT = 0x6 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1781,6 +1825,9 @@ const ( IFLA_BOND_AD_ACTOR_SYSTEM = 0x1a IFLA_BOND_TLB_DYNAMIC_LB = 0x1b IFLA_BOND_PEER_NOTIF_DELAY = 0x1c + IFLA_BOND_AD_LACP_ACTIVE = 0x1d + IFLA_BOND_MISSED_MAX = 0x1e + IFLA_BOND_NS_IP6_TARGET = 0x1f IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1796,6 +1843,7 @@ const ( IFLA_BOND_SLAVE_AD_AGGREGATOR_ID = 0x6 IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE = 0x7 IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE = 0x8 + IFLA_BOND_SLAVE_PRIO = 0x9 IFLA_VF_INFO_UNSPEC = 0x0 IFLA_VF_INFO = 0x1 IFLA_VF_UNSPEC = 0x0 @@ -1854,8 +1902,16 @@ const ( IFLA_STATS_LINK_XSTATS_SLAVE = 0x3 IFLA_STATS_LINK_OFFLOAD_XSTATS = 0x4 IFLA_STATS_AF_SPEC = 0x5 + IFLA_STATS_GETSET_UNSPEC = 0x0 + IFLA_STATS_GET_FILTERS = 0x1 + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS = 0x2 IFLA_OFFLOAD_XSTATS_UNSPEC = 0x0 IFLA_OFFLOAD_XSTATS_CPU_HIT = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO = 0x2 + IFLA_OFFLOAD_XSTATS_L3_STATS = 0x3 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC = 0x0 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST = 0x1 + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED = 0x2 IFLA_XDP_UNSPEC = 0x0 IFLA_XDP_FD = 0x1 IFLA_XDP_ATTACHED = 0x2 @@ -1885,6 +1941,11 @@ const ( IFLA_RMNET_UNSPEC = 0x0 IFLA_RMNET_MUX_ID = 0x1 IFLA_RMNET_FLAGS = 0x2 + IFLA_MCTP_UNSPEC = 0x0 + IFLA_MCTP_NET = 0x1 + IFLA_DSA_UNSPEC = 0x0 + IFLA_DSA_CONDUIT = 0x1 + IFLA_DSA_MASTER = 0x1 ) const ( @@ -2421,6 +2482,15 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Chunk_size uint32 + Headroom uint32 + Flags uint32 + Tx_metadata_len uint32 +} + type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2875,7 +2945,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd + BPF_TCP_MAX_STATES = 0xe TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -3151,7 +3221,7 @@ const ( DEVLINK_CMD_LINECARD_NEW = 0x50 DEVLINK_CMD_LINECARD_DEL = 0x51 DEVLINK_CMD_SELFTESTS_GET = 0x52 - DEVLINK_CMD_MAX = 0x53 + DEVLINK_CMD_MAX = 0x54 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -4535,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x146 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4801,7 +4871,7 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x16 + NL80211_BSS_MAX = 0x18 NL80211_BSS_MLD_ADDR = 0x16 NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 @@ -4905,7 +4975,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9a + NL80211_CMD_MAX = 0x9b NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5139,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1c + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5633,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -5931,3 +6001,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 438a30af..fd402da4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -477,14 +477,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index adceca35..eb7a5e18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -492,15 +492,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index eeaa00a3..d78ac108 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -470,15 +470,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6739aa91..cd06d47f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -471,15 +471,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 9920ef63..2f28fe26 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -472,15 +472,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2923b799..71d6cac2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ce2750ee..8596d453 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 3038811d..cd60ea18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index efc6fed1..b0ae420c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 9a654b75..83597287 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -482,15 +482,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 40d358e3..69eb6a5c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 148c6ceb..5f583cb6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 72ba8154..15adc041 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -499,15 +499,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 71e76550..cf3ce900 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -495,15 +495,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 4abbdb9d..590b5673 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 54f31be6..d9a13af4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -25,10 +25,13 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 + SizeofInet4Pktinfo = 8 + SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 + SizeofUcred = 12 ) type ( @@ -69,12 +72,17 @@ type Utimbuf struct { } type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte + Sysname [16]byte + Nodename [32]byte + Release [8]byte + Version [8]byte + Machine [16]byte +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 } type RawSockaddrInet4 struct { @@ -325,7 +333,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint32 + Type uint64 Bsize uint64 Blocks uint64 Bfree uint64 @@ -336,6 +344,7 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 + _ [4]uint64 } type direntLE struct { @@ -412,3 +421,126 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name string +} + +const ( + SizeofInotifyEvent = 0x10 +) + +type ConsMsg2 struct { + Cm2Format uint16 + Cm2R1 uint16 + Cm2Msglength uint32 + Cm2Msg *byte + Cm2R2 [4]byte + Cm2R3 [4]byte + Cm2Routcde *uint32 + Cm2Descr *uint32 + Cm2Msgflag uint32 + Cm2Token uint32 + Cm2Msgid *uint32 + Cm2R4 [4]byte + Cm2DomToken uint32 + Cm2DomMsgid *uint32 + Cm2ModCartptr *byte + Cm2ModConsidptr *byte + Cm2MsgCart [8]byte + Cm2MsgConsid [4]byte + Cm2R5 [12]byte +} + +const ( + CC_modify = 1 + CC_stop = 2 + CONSOLE_FORMAT_2 = 2 + CONSOLE_FORMAT_3 = 3 + CONSOLE_HRDCPY = 0x80000000 +) + +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_CACHED = 0x20 + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + _ [44]byte +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode int32 +} + +type SysvShmDesc struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ uint8 + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime Time_t + Dtime Time_t + Ctime Time_t +} + +type SysvShmDesc64 struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ byte + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 +} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index ce2d713d..16f90560 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 +//go:build windows package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index ba64caca..00000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a8..6f7d2ac7 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031..6525c62f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6..d8cb71db 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82..9f73df75 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -389,6 +401,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -1641,6 +1654,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1874,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1894,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2103,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2871,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2993,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3082,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3145,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3238,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3392,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { @@ -3378,6 +3487,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { diff --git a/vendor/golang.org/x/text/cases/cases.go b/vendor/golang.org/x/text/cases/cases.go new file mode 100644 index 00000000..752cdf03 --- /dev/null +++ b/vendor/golang.org/x/text/cases/cases.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_trieval.go + +// Package cases provides general and language-specific case mappers. +package cases // import "golang.org/x/text/cases" + +import ( + "golang.org/x/text/language" + "golang.org/x/text/transform" +) + +// References: +// - Unicode Reference Manual Chapter 3.13, 4.2, and 5.18. +// - https://www.unicode.org/reports/tr29/ +// - https://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt +// - https://www.unicode.org/Public/6.3.0/ucd/SpecialCasing.txt +// - https://www.unicode.org/Public/6.3.0/ucd/DerivedCoreProperties.txt +// - https://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt +// - https://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakTest.txt +// - http://userguide.icu-project.org/transforms/casemappings + +// TODO: +// - Case folding +// - Wide and Narrow? +// - Segmenter option for title casing. +// - ASCII fast paths +// - Encode Soft-Dotted property within trie somehow. + +// A Caser transforms given input to a certain case. It implements +// transform.Transformer. +// +// A Caser may be stateful and should therefore not be shared between +// goroutines. +type Caser struct { + t transform.SpanningTransformer +} + +// Bytes returns a new byte slice with the result of converting b to the case +// form implemented by c. +func (c Caser) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(c.t, b) + return b +} + +// String returns a string with the result of transforming s to the case form +// implemented by c. +func (c Caser) String(s string) string { + s, _, _ = transform.String(c.t, s) + return s +} + +// Reset resets the Caser to be reused for new input after a previous call to +// Transform. +func (c Caser) Reset() { c.t.Reset() } + +// Transform implements the transform.Transformer interface and transforms the +// given input to the case form implemented by c. +func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return c.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (c Caser) Span(src []byte, atEOF bool) (n int, err error) { + return c.t.Span(src, atEOF) +} + +// Upper returns a Caser for language-specific uppercasing. +func Upper(t language.Tag, opts ...Option) Caser { + return Caser{makeUpper(t, getOpts(opts...))} +} + +// Lower returns a Caser for language-specific lowercasing. +func Lower(t language.Tag, opts ...Option) Caser { + return Caser{makeLower(t, getOpts(opts...))} +} + +// Title returns a Caser for language-specific title casing. It uses an +// approximation of the default Unicode Word Break algorithm. +func Title(t language.Tag, opts ...Option) Caser { + return Caser{makeTitle(t, getOpts(opts...))} +} + +// Fold returns a Caser that implements Unicode case folding. The returned Caser +// is stateless and safe to use concurrently by multiple goroutines. +// +// Case folding does not normalize the input and may not preserve a normal form. +// Use the collate or search package for more convenient and linguistically +// sound comparisons. Use golang.org/x/text/secure/precis for string comparisons +// where security aspects are a concern. +func Fold(opts ...Option) Caser { + return Caser{makeFold(getOpts(opts...))} +} + +// An Option is used to modify the behavior of a Caser. +type Option func(o options) options + +// TODO: consider these options to take a boolean as well, like FinalSigma. +// The advantage of using this approach is that other providers of a lower-case +// algorithm could set different defaults by prefixing a user-provided slice +// of options with their own. This is handy, for instance, for the precis +// package which would override the default to not handle the Greek final sigma. + +var ( + // NoLower disables the lowercasing of non-leading letters for a title + // caser. + NoLower Option = noLower + + // Compact omits mappings in case folding for characters that would grow the + // input. (Unimplemented.) + Compact Option = compact +) + +// TODO: option to preserve a normal form, if applicable? + +type options struct { + noLower bool + simple bool + + // TODO: segmenter, max ignorable, alternative versions, etc. + + ignoreFinalSigma bool +} + +func getOpts(o ...Option) (res options) { + for _, f := range o { + res = f(res) + } + return +} + +func noLower(o options) options { + o.noLower = true + return o +} + +func compact(o options) options { + o.simple = true + return o +} + +// HandleFinalSigma specifies whether the special handling of Greek final sigma +// should be enabled. Unicode prescribes handling the Greek final sigma for all +// locales, but standards like IDNA and PRECIS override this default. +func HandleFinalSigma(enable bool) Option { + if enable { + return handleFinalSigma + } + return ignoreFinalSigma +} + +func ignoreFinalSigma(o options) options { + o.ignoreFinalSigma = true + return o +} + +func handleFinalSigma(o options) options { + o.ignoreFinalSigma = false + return o +} diff --git a/vendor/golang.org/x/text/cases/context.go b/vendor/golang.org/x/text/cases/context.go new file mode 100644 index 00000000..e9aa9e19 --- /dev/null +++ b/vendor/golang.org/x/text/cases/context.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +// A context is used for iterating over source bytes, fetching case info and +// writing to a destination buffer. +// +// Casing operations may need more than one rune of context to decide how a rune +// should be cased. Casing implementations should call checkpoint on context +// whenever it is known to be safe to return the runes processed so far. +// +// It is recommended for implementations to not allow for more than 30 case +// ignorables as lookahead (analogous to the limit in norm) and to use state if +// unbounded lookahead is needed for cased runes. +type context struct { + dst, src []byte + atEOF bool + + pDst int // pDst points past the last written rune in dst. + pSrc int // pSrc points to the start of the currently scanned rune. + + // checkpoints safe to return in Transform, where nDst <= pDst and nSrc <= pSrc. + nDst, nSrc int + err error + + sz int // size of current rune + info info // case information of currently scanned rune + + // State preserved across calls to Transform. + isMidWord bool // false if next cased letter needs to be title-cased. +} + +func (c *context) Reset() { + c.isMidWord = false +} + +// ret returns the return values for the Transform method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) ret() (nDst, nSrc int, err error) { + if c.err != nil || c.nSrc == len(c.src) { + return c.nDst, c.nSrc, c.err + } + // This point is only reached by mappers if there was no short destination + // buffer. This means that the source buffer was exhausted and that c.sz was + // set to 0 by next. + if c.atEOF && c.pSrc == len(c.src) { + return c.pDst, c.pSrc, nil + } + return c.nDst, c.nSrc, transform.ErrShortSrc +} + +// retSpan returns the return values for the Span method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) retSpan() (n int, err error) { + _, nSrc, err := c.ret() + return nSrc, err +} + +// checkpoint sets the return value buffer points for Transform to the current +// positions. +func (c *context) checkpoint() { + if c.err == nil { + c.nDst, c.nSrc = c.pDst, c.pSrc+c.sz + } +} + +// unreadRune causes the last rune read by next to be reread on the next +// invocation of next. Only one unreadRune may be called after a call to next. +func (c *context) unreadRune() { + c.sz = 0 +} + +func (c *context) next() bool { + c.pSrc += c.sz + if c.pSrc == len(c.src) || c.err != nil { + c.info, c.sz = 0, 0 + return false + } + v, sz := trie.lookup(c.src[c.pSrc:]) + c.info, c.sz = info(v), sz + if c.sz == 0 { + if c.atEOF { + // A zero size means we have an incomplete rune. If we are atEOF, + // this means it is an illegal rune, which we will consume one + // byte at a time. + c.sz = 1 + } else { + c.err = transform.ErrShortSrc + return false + } + } + return true +} + +// writeBytes adds bytes to dst. +func (c *context) writeBytes(b []byte) bool { + if len(c.dst)-c.pDst < len(b) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for _, ch := range b { + c.dst[c.pDst] = ch + c.pDst++ + } + return true +} + +// writeString writes the given string to dst. +func (c *context) writeString(s string) bool { + if len(c.dst)-c.pDst < len(s) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for i := 0; i < len(s); i++ { + c.dst[c.pDst] = s[i] + c.pDst++ + } + return true +} + +// copy writes the current rune to dst. +func (c *context) copy() bool { + return c.writeBytes(c.src[c.pSrc : c.pSrc+c.sz]) +} + +// copyXOR copies the current rune to dst and modifies it by applying the XOR +// pattern of the case info. It is the responsibility of the caller to ensure +// that this is a rune with a XOR pattern defined. +func (c *context) copyXOR() bool { + if !c.copy() { + return false + } + if c.info&xorIndexBit == 0 { + // Fast path for 6-bit XOR pattern, which covers most cases. + c.dst[c.pDst-1] ^= byte(c.info >> xorShift) + } else { + // Interpret XOR bits as an index. + // TODO: test performance for unrolling this loop. Verify that we have + // at least two bytes and at most three. + idx := c.info >> xorShift + for p := c.pDst - 1; ; p-- { + c.dst[p] ^= xorData[idx] + idx-- + if xorData[idx] == 0 { + break + } + } + } + return true +} + +// hasPrefix returns true if src[pSrc:] starts with the given string. +func (c *context) hasPrefix(s string) bool { + b := c.src[c.pSrc:] + if len(b) < len(s) { + return false + } + for i, c := range b[:len(s)] { + if c != s[i] { + return false + } + } + return true +} + +// caseType returns an info with only the case bits, normalized to either +// cLower, cUpper, cTitle or cUncased. +func (c *context) caseType() info { + cm := c.info & 0x7 + if cm < 4 { + return cm + } + if cm >= cXORCase { + // xor the last bit of the rune with the case type bits. + b := c.src[c.pSrc+c.sz-1] + return info(b&1) ^ cm&0x3 + } + if cm == cIgnorableCased { + return cLower + } + return cUncased +} + +// lower writes the lowercase version of the current rune to dst. +func lower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + return c.writeString(e[offset : offset+nLower]) + } + return c.copy() +} + +func isLower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// upper writes the uppercase version of the current rune to dst. +func upper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + // The first special case mapping is for lower. Set n to the second. + if n == noChange { + n = 0 + } + n, e = e[1]&lengthMask, e[n:] + } + if n != noChange { + return c.writeString(e[offset : offset+n]) + } + return c.copy() +} + +// isUpper writes the isUppercase version of the current rune to dst. +func isUpper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + n = e[1] & lengthMask + } + if n != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// title writes the title case version of the current rune to dst. +func title(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return c.copy() + } + if c.info&exceptionBit == 0 { + if ct == cLower { + return c.copyXOR() + } + return c.copy() + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + + nFirst := (e[1] >> lengthBits) & lengthMask + if nTitle := e[1] & lengthMask; nTitle != noChange { + if nFirst != noChange { + e = e[nFirst:] + } + return c.writeString(e[offset : offset+nTitle]) + } + if ct == cLower && nFirst != noChange { + // Use the uppercase version instead. + return c.writeString(e[offset : offset+nFirst]) + } + // Already in correct case. + return c.copy() +} + +// isTitle reports whether the current rune is in title case. +func isTitle(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return true + } + if c.info&exceptionBit == 0 { + if ct == cLower { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + if nTitle := e[1] & lengthMask; nTitle != noChange { + c.err = transform.ErrEndOfSpan + return false + } + nFirst := (e[1] >> lengthBits) & lengthMask + if ct == cLower && nFirst != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// foldFull writes the foldFull version of the current rune to dst. +func foldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return c.copy() + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + return c.copyXOR() + } + return c.copy() + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 { + if ct == cLower { + return c.copy() + } + n = (e[1] >> lengthBits) & lengthMask + } + return c.writeString(e[2 : 2+n]) +} + +// isFoldFull reports whether the current run is mapped to foldFull +func isFoldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return true + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 && ct == cLower { + return true + } + c.err = transform.ErrEndOfSpan + return false +} diff --git a/vendor/golang.org/x/text/cases/fold.go b/vendor/golang.org/x/text/cases/fold.go new file mode 100644 index 00000000..85cc434f --- /dev/null +++ b/vendor/golang.org/x/text/cases/fold.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +type caseFolder struct{ transform.NopResetter } + +// caseFolder implements the Transformer interface for doing case folding. +func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + foldFull(&c) + c.checkpoint() + } + return c.ret() +} + +func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isFoldFull(&c) { + c.checkpoint() + } + return c.retSpan() +} + +func makeFold(o options) transform.SpanningTransformer { + // TODO: Special case folding, through option Language, Special/Turkic, or + // both. + // TODO: Implement Compact options. + return &caseFolder{} +} diff --git a/vendor/golang.org/x/text/cases/icu.go b/vendor/golang.org/x/text/cases/icu.go new file mode 100644 index 00000000..db7c237c --- /dev/null +++ b/vendor/golang.org/x/text/cases/icu.go @@ -0,0 +1,61 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build icu + +package cases + +// Ideally these functions would be defined in a test file, but go test doesn't +// allow CGO in tests. The build tag should ensure either way that these +// functions will not end up in the package. + +// TODO: Ensure that the correct ICU version is set. + +/* +#cgo LDFLAGS: -licui18n.57 -licuuc.57 +#include +#include +#include +#include +#include +*/ +import "C" + +import "unsafe" + +func doICU(tag, caser, input string) string { + err := C.UErrorCode(0) + loc := C.CString(tag) + cm := C.ucasemap_open(loc, C.uint32_t(0), &err) + + buf := make([]byte, len(input)*4) + dst := (*C.char)(unsafe.Pointer(&buf[0])) + src := C.CString(input) + + cn := C.int32_t(0) + + switch caser { + case "fold": + cn = C.ucasemap_utf8FoldCase(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "lower": + cn = C.ucasemap_utf8ToLower(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "upper": + cn = C.ucasemap_utf8ToUpper(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "title": + cn = C.ucasemap_utf8ToTitle(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + } + return string(buf[:cn]) +} diff --git a/vendor/golang.org/x/text/cases/info.go b/vendor/golang.org/x/text/cases/info.go new file mode 100644 index 00000000..87a7c3e9 --- /dev/null +++ b/vendor/golang.org/x/text/cases/info.go @@ -0,0 +1,82 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +func (c info) cccVal() info { + if c&exceptionBit != 0 { + return info(exceptions[c>>exceptionShift]) & cccMask + } + return c & cccMask +} + +func (c info) cccType() info { + ccc := c.cccVal() + if ccc <= cccZero { + return cccZero + } + return ccc +} + +// TODO: Implement full Unicode breaking algorithm: +// 1) Implement breaking in separate package. +// 2) Use the breaker here. +// 3) Compare table size and performance of using the more generic breaker. +// +// Note that we can extend the current algorithm to be much more accurate. This +// only makes sense, though, if the performance and/or space penalty of using +// the generic breaker is big. Extra data will only be needed for non-cased +// runes, which means there are sufficient bits left in the caseType. +// ICU prohibits breaking in such cases as well. + +// For the purpose of title casing we use an approximation of the Unicode Word +// Breaking algorithm defined in Annex #29: +// https://www.unicode.org/reports/tr29/#Default_Grapheme_Cluster_Table. +// +// For our approximation, we group the Word Break types into the following +// categories, with associated rules: +// +// 1) Letter: +// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ. +// Rule: Never break between consecutive runes of this category. +// +// 2) Mid: +// MidLetter, MidNumLet, Single_Quote. +// (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn, +// Me, Cf, Lm or Sk). +// Rule: Don't break between Letter and Mid, but break between two Mids. +// +// 3) Break: +// Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and +// Other. +// These categories should always result in a break between two cased letters. +// Rule: Always break. +// +// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in +// preventing a break between two cased letters. For now we will ignore this +// (e.g. [ALetter] [ExtendNumLet] [Katakana] [ExtendNumLet] [ALetter] and +// [ALetter] [Numeric] [MidNum] [Numeric] [ALetter].) +// +// Note 2: the rule for Mid is very approximate, but works in most cases. To +// improve, we could store the categories in the trie value and use a FA to +// manage breaks. See TODO comment above. +// +// Note 3: according to the spec, it is possible for the Extend category to +// introduce breaks between other categories grouped in Letter. However, this +// is undesirable for our purposes. ICU prevents breaks in such cases as well. + +// isBreak returns whether this rune should introduce a break. +func (c info) isBreak() bool { + return c.cccVal() == cccBreak +} + +// isLetter returns whether the rune is of break type ALetter, Hebrew_Letter, +// Numeric, ExtendNumLet, or Extend. +func (c info) isLetter() bool { + ccc := c.cccVal() + if ccc == cccZero { + return !c.isCaseIgnorable() + } + return ccc != cccBreak +} diff --git a/vendor/golang.org/x/text/cases/map.go b/vendor/golang.org/x/text/cases/map.go new file mode 100644 index 00000000..0f7c6a14 --- /dev/null +++ b/vendor/golang.org/x/text/cases/map.go @@ -0,0 +1,816 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +// This file contains the definitions of case mappings for all supported +// languages. The rules for the language-specific tailorings were taken and +// modified from the CLDR transform definitions in common/transforms. + +import ( + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/text/internal" + "golang.org/x/text/language" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// A mapFunc takes a context set to the current rune and writes the mapped +// version to the same context. It may advance the context to the next rune. It +// returns whether a checkpoint is possible: whether the pDst bytes written to +// dst so far won't need changing as we see more source bytes. +type mapFunc func(*context) bool + +// A spanFunc takes a context set to the current rune and returns whether this +// rune would be altered when written to the output. It may advance the context +// to the next rune. It returns whether a checkpoint is possible. +type spanFunc func(*context) bool + +// maxIgnorable defines the maximum number of ignorables to consider for +// lookahead operations. +const maxIgnorable = 30 + +// supported lists the language tags for which we have tailorings. +const supported = "und af az el lt nl tr" + +func init() { + tags := []language.Tag{} + for _, s := range strings.Split(supported, " ") { + tags = append(tags, language.MustParse(s)) + } + matcher = internal.NewInheritanceMatcher(tags) + Supported = language.NewCoverage(tags) +} + +var ( + matcher *internal.InheritanceMatcher + + Supported language.Coverage + + // We keep the following lists separate, instead of having a single per- + // language struct, to give the compiler a chance to remove unused code. + + // Some uppercase mappers are stateless, so we can precompute the + // Transformers and save a bit on runtime allocations. + upperFunc = []struct { + upper mapFunc + span spanFunc + }{ + {nil, nil}, // und + {nil, nil}, // af + {aztrUpper(upper), isUpper}, // az + {elUpper, noSpan}, // el + {ltUpper(upper), noSpan}, // lt + {nil, nil}, // nl + {aztrUpper(upper), isUpper}, // tr + } + + undUpper transform.SpanningTransformer = &undUpperCaser{} + undLower transform.SpanningTransformer = &undLowerCaser{} + undLowerIgnoreSigma transform.SpanningTransformer = &undLowerIgnoreSigmaCaser{} + + lowerFunc = []mapFunc{ + nil, // und + nil, // af + aztrLower, // az + nil, // el + ltLower, // lt + nil, // nl + aztrLower, // tr + } + + titleInfos = []struct { + title mapFunc + lower mapFunc + titleSpan spanFunc + rewrite func(*context) + }{ + {title, lower, isTitle, nil}, // und + {title, lower, isTitle, afnlRewrite}, // af + {aztrUpper(title), aztrLower, isTitle, nil}, // az + {title, lower, isTitle, nil}, // el + {ltUpper(title), ltLower, noSpan, nil}, // lt + {nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl + {aztrUpper(title), aztrLower, isTitle, nil}, // tr + } +) + +func makeUpper(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := upperFunc[i].upper + if f == nil { + return undUpper + } + return &simpleCaser{f: f, span: upperFunc[i].span} +} + +func makeLower(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := lowerFunc[i] + if f == nil { + if o.ignoreFinalSigma { + return undLowerIgnoreSigma + } + return undLower + } + if o.ignoreFinalSigma { + return &simpleCaser{f: f, span: isLower} + } + return &lowerCaser{ + first: f, + midWord: finalSigma(f), + } +} + +func makeTitle(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + x := &titleInfos[i] + lower := x.lower + if o.noLower { + lower = (*context).copy + } else if !o.ignoreFinalSigma { + lower = finalSigma(lower) + } + return &titleCaser{ + title: x.title, + lower: lower, + titleSpan: x.titleSpan, + rewrite: x.rewrite, + } +} + +func noSpan(c *context) bool { + c.err = transform.ErrEndOfSpan + return false +} + +// TODO: consider a similar special case for the fast majority lower case. This +// is a bit more involved so will require some more precise benchmarking to +// justify it. + +type undUpperCaser struct{ transform.NopResetter } + +// undUpperCaser implements the Transformer interface for doing an upper case +// mapping for the root locale (und). It eliminates the need for an allocation +// as it prevents escaping by not using function pointers. +func (t undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + upper(&c) + c.checkpoint() + } + return c.ret() +} + +func (t undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isUpper(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// undLowerIgnoreSigmaCaser implements the Transformer interface for doing +// a lower case mapping for the root locale (und) ignoring final sigma +// handling. This casing algorithm is used in some performance-critical packages +// like secure/precis and x/net/http/idna, which warrants its special-casing. +type undLowerIgnoreSigmaCaser struct{ transform.NopResetter } + +func (t undLowerIgnoreSigmaCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() && lower(&c) { + c.checkpoint() + } + return c.ret() + +} + +// Span implements a generic lower-casing. This is possible as isLower works +// for all lowercasing variants. All lowercase variants only vary in how they +// transform a non-lowercase letter. They will never change an already lowercase +// letter. In addition, there is no state. +func (t undLowerIgnoreSigmaCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isLower(&c) { + c.checkpoint() + } + return c.retSpan() +} + +type simpleCaser struct { + context + f mapFunc + span spanFunc +} + +// simpleCaser implements the Transformer interface for doing a case operation +// on a rune-by-rune basis. +func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() && t.f(&c) { + c.checkpoint() + } + return c.ret() +} + +func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && t.span(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// undLowerCaser implements the Transformer interface for doing a lower case +// mapping for the root locale (und) ignoring final sigma handling. This casing +// algorithm is used in some performance-critical packages like secure/precis +// and x/net/http/idna, which warrants its special-casing. +type undLowerCaser struct{ transform.NopResetter } + +func (t undLowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + + for isInterWord := true; c.next(); { + if isInterWord { + if c.info.isCased() { + if !lower(&c) { + break + } + isInterWord = false + } else if !c.copy() { + break + } + } else { + if c.info.isNotCasedAndNotCaseIgnorable() { + if !c.copy() { + break + } + isInterWord = true + } else if !c.hasPrefix("Σ") { + if !lower(&c) { + break + } + } else if !finalSigmaBody(&c) { + break + } + } + c.checkpoint() + } + return c.ret() +} + +func (t undLowerCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isLower(&c) { + c.checkpoint() + } + return c.retSpan() +} + +// lowerCaser implements the Transformer interface. The default Unicode lower +// casing requires different treatment for the first and subsequent characters +// of a word, most notably to handle the Greek final Sigma. +type lowerCaser struct { + undLowerIgnoreSigmaCaser + + context + + first, midWord mapFunc +} + +func (t *lowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF} + c := &t.context + + for isInterWord := true; c.next(); { + if isInterWord { + if c.info.isCased() { + if !t.first(c) { + break + } + isInterWord = false + } else if !c.copy() { + break + } + } else { + if c.info.isNotCasedAndNotCaseIgnorable() { + if !c.copy() { + break + } + isInterWord = true + } else if !t.midWord(c) { + break + } + } + c.checkpoint() + } + return c.ret() +} + +// titleCaser implements the Transformer interface. Title casing algorithms +// distinguish between the first letter of a word and subsequent letters of the +// same word. It uses state to avoid requiring a potentially infinite lookahead. +type titleCaser struct { + context + + // rune mappings used by the actual casing algorithms. + title mapFunc + lower mapFunc + titleSpan spanFunc + + rewrite func(*context) +} + +// Transform implements the standard Unicode title case algorithm as defined in +// Chapter 3 of The Unicode Standard: +// toTitlecase(X): Find the word boundaries in X according to Unicode Standard +// Annex #29, "Unicode Text Segmentation." For each word boundary, find the +// first cased character F following the word boundary. If F exists, map F to +// Titlecase_Mapping(F); then map all characters C between F and the following +// word boundary to Lowercase_Mapping(C). +func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.ret() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.title(c) { + break + } + c.isMidWord = true + } else if !t.lower(c) { + break + } + } else if !c.copy() { + break + } else if p.isBreak() { + c.isMidWord = false + } + + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.ret() +} + +func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) { + t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.retSpan() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.titleSpan(c) { + break + } + c.isMidWord = true + } else if !isLower(c) { + break + } + } else if p.isBreak() { + c.isMidWord = false + } + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.retSpan() +} + +// finalSigma adds Greek final Sigma handing to another casing function. It +// determines whether a lowercased sigma should be σ or ς, by looking ahead for +// case-ignorables and a cased letters. +func finalSigma(f mapFunc) mapFunc { + return func(c *context) bool { + if !c.hasPrefix("Σ") { + return f(c) + } + return finalSigmaBody(c) + } +} + +func finalSigmaBody(c *context) bool { + // Current rune must be ∑. + + // ::NFD(); + // # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA + // Σ } [:case-ignorable:]* [:cased:] → σ; + // [:cased:] [:case-ignorable:]* { Σ → ς; + // ::Any-Lower; + // ::NFC(); + + p := c.pDst + c.writeString("ς") + + // TODO: we should do this here, but right now this will never have an + // effect as this is called when the prefix is Sigma, whereas Dutch and + // Afrikaans only test for an apostrophe. + // + // if t.rewrite != nil { + // t.rewrite(c) + // } + + // We need to do one more iteration after maxIgnorable, as a cased + // letter is not an ignorable and may modify the result. + wasMid := false + for i := 0; i < maxIgnorable+1; i++ { + if !c.next() { + return false + } + if !c.info.isCaseIgnorable() { + // All Midword runes are also case ignorable, so we are + // guaranteed to have a letter or word break here. As we are + // unreading the run, there is no need to unset c.isMidWord; + // the title caser will handle this. + if c.info.isCased() { + // p+1 is guaranteed to be in bounds: if writing ς was + // successful, p+1 will contain the second byte of ς. If not, + // this function will have returned after c.next returned false. + c.dst[p+1]++ // ς → σ + } + c.unreadRune() + return true + } + // A case ignorable may also introduce a word break, so we may need + // to continue searching even after detecting a break. + isMid := c.info.isMid() + if (wasMid && isMid) || c.info.isBreak() { + c.isMidWord = false + } + wasMid = isMid + c.copy() + } + return true +} + +// finalSigmaSpan would be the same as isLower. + +// elUpper implements Greek upper casing, which entails removing a predefined +// set of non-blocked modifiers. Note that these accents should not be removed +// for title casing! +// Example: "Οδός" -> "ΟΔΟΣ". +func elUpper(c *context) bool { + // From CLDR: + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Above:]]*? { [\u0313\u0314\u0301\u0300\u0306\u0342\u0308\u0304] → ; + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Iota_Subscript:]]*? { \u0345 → ; + + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !upper(c) { + return false + } + if !unicode.Is(unicode.Greek, r) { + return true + } + i := 0 + // Take the properties of the uppercased rune that is already written to the + // destination. This saves us the trouble of having to uppercase the + // decomposed rune again. + if b := norm.NFD.Properties(c.dst[oldPDst:]).Decomposition(); b != nil { + // Restore the destination position and process the decomposed rune. + r, sz := utf8.DecodeRune(b) + if r <= 0xFF { // See A.6.1 + return true + } + c.pDst = oldPDst + // Insert the first rune and ignore the modifiers. See A.6.2. + c.writeBytes(b[:sz]) + i = len(b[sz:]) / 2 // Greek modifiers are always of length 2. + } + + for ; i < maxIgnorable && c.next(); i++ { + switch r, _ := utf8.DecodeRune(c.src[c.pSrc:]); r { + // Above and Iota Subscript + case 0x0300, // U+0300 COMBINING GRAVE ACCENT + 0x0301, // U+0301 COMBINING ACUTE ACCENT + 0x0304, // U+0304 COMBINING MACRON + 0x0306, // U+0306 COMBINING BREVE + 0x0308, // U+0308 COMBINING DIAERESIS + 0x0313, // U+0313 COMBINING COMMA ABOVE + 0x0314, // U+0314 COMBINING REVERSED COMMA ABOVE + 0x0342, // U+0342 COMBINING GREEK PERISPOMENI + 0x0345: // U+0345 COMBINING GREEK YPOGEGRAMMENI + // No-op. Gobble the modifier. + + default: + switch v, _ := trie.lookup(c.src[c.pSrc:]); info(v).cccType() { + case cccZero: + c.unreadRune() + return true + + // We don't need to test for IotaSubscript as the only rune that + // qualifies (U+0345) was already excluded in the switch statement + // above. See A.4. + + case cccAbove: + return c.copy() + default: + // Some other modifier. We're still allowed to gobble Greek + // modifiers after this. + c.copy() + } + } + } + return i == maxIgnorable +} + +// TODO: implement elUpperSpan (low-priority: complex and infrequent). + +func ltLower(c *context) bool { + // From CLDR: + // # Introduce an explicit dot above when lowercasing capital I's and J's + // # whenever there are more accents above. + // # (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) + // # 0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I + // # 004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J + // # 012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK + // # 00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE + // # 00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE + // # 0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE + // ::NFD(); + // I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307; + // J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307; + // I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307; + // I \u0300 (Ì) → i \u0307 \u0300; + // I \u0301 (Í) → i \u0307 \u0301; + // I \u0303 (Ĩ) → i \u0307 \u0303; + // ::Any-Lower(); + // ::NFC(); + + i := 0 + if r := c.src[c.pSrc]; r < utf8.RuneSelf { + lower(c) + if r != 'I' && r != 'J' { + return true + } + } else { + p := norm.NFD.Properties(c.src[c.pSrc:]) + if d := p.Decomposition(); len(d) >= 3 && (d[0] == 'I' || d[0] == 'J') { + // UTF-8 optimization: the decomposition will only have an above + // modifier if the last rune of the decomposition is in [U+300-U+311]. + // In all other cases, a decomposition starting with I is always + // an I followed by modifiers that are not cased themselves. See A.2. + if d[1] == 0xCC && d[2] <= 0x91 { // A.2.4. + if !c.writeBytes(d[:1]) { + return false + } + c.dst[c.pDst-1] += 'a' - 'A' // lower + + // Assumption: modifier never changes on lowercase. See A.1. + // Assumption: all modifiers added have CCC = Above. See A.2.3. + return c.writeString("\u0307") && c.writeBytes(d[1:]) + } + // In all other cases the additional modifiers will have a CCC + // that is less than 230 (Above). We will insert the U+0307, if + // needed, after these modifiers so that a string in FCD form + // will remain so. See A.2.2. + lower(c) + i = 1 + } else { + return lower(c) + } + } + + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + return c.writeString("\u0307") && c.copy() // See A.1. + default: + c.copy() // See A.1. + } + } + return i == maxIgnorable +} + +// ltLowerSpan would be the same as isLower. + +func ltUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // Unicode: + // 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE + // + // From CLDR: + // # Remove \u0307 following soft-dotteds (i, j, and the like), with possible + // # intervening non-230 marks. + // ::NFD(); + // [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ; + // ::Any-Upper(); + // ::NFC(); + + // TODO: See A.5. A soft-dotted rune never has an exception. This would + // allow us to overload the exception bit and encode this property in + // info. Need to measure performance impact of this. + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !f(c) { + return false + } + if !unicode.Is(unicode.Soft_Dotted, r) { + return true + } + + // We don't need to do an NFD normalization, as a soft-dotted rune never + // contains U+0307. See A.3. + + i := 0 + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + if c.hasPrefix("\u0307") { + // We don't do a full NFC, but rather combine runes for + // some of the common cases. (Returning NFC or + // preserving normal form is neither a requirement nor + // a possibility anyway). + if !c.next() { + return false + } + if c.dst[oldPDst] == 'I' && c.pDst == oldPDst+1 && c.src[c.pSrc] == 0xcc { + s := "" + switch c.src[c.pSrc+1] { + case 0x80: // U+0300 COMBINING GRAVE ACCENT + s = "\u00cc" // U+00CC LATIN CAPITAL LETTER I WITH GRAVE + case 0x81: // U+0301 COMBINING ACUTE ACCENT + s = "\u00cd" // U+00CD LATIN CAPITAL LETTER I WITH ACUTE + case 0x83: // U+0303 COMBINING TILDE + s = "\u0128" // U+0128 LATIN CAPITAL LETTER I WITH TILDE + case 0x88: // U+0308 COMBINING DIAERESIS + s = "\u00cf" // U+00CF LATIN CAPITAL LETTER I WITH DIAERESIS + default: + } + if s != "" { + c.pDst = oldPDst + return c.writeString(s) + } + } + } + return c.copy() + default: + c.copy() + } + } + return i == maxIgnorable + } +} + +// TODO: implement ltUpperSpan (low priority: complex and infrequent). + +func aztrUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // i→İ; + if c.src[c.pSrc] == 'i' { + return c.writeString("İ") + } + return f(c) + } +} + +func aztrLower(c *context) (done bool) { + // From CLDR: + // # I and i-dotless; I-dot and i are case pairs in Turkish and Azeri + // # 0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE + // İ→i; + // # When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. + // # This matches the behavior of the canonically equivalent I-dot_above + // # 0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE + // # When lowercasing, unless an I is before a dot_above, it turns into a dotless i. + // # 0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I + // I([^[:ccc=Not_Reordered:][:ccc=Above:]]*)\u0307 → i$1 ; + // I→ı ; + // ::Any-Lower(); + if c.hasPrefix("\u0130") { // İ + return c.writeString("i") + } + if c.src[c.pSrc] != 'I' { + return lower(c) + } + + // We ignore the lower-case I for now, but insert it later when we know + // which form we need. + start := c.pSrc + c.sz + + i := 0 +Loop: + // We check for up to n ignorables before \u0307. As \u0307 is an + // ignorable as well, n is maxIgnorable-1. + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccAbove: + if c.hasPrefix("\u0307") { + return c.writeString("i") && c.writeBytes(c.src[start:c.pSrc]) // ignore U+0307 + } + done = true + break Loop + case cccZero: + c.unreadRune() + done = true + break Loop + default: + // We'll write this rune after we know which starter to use. + } + } + if i == maxIgnorable { + done = true + } + return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done +} + +// aztrLowerSpan would be the same as isLower. + +func nlTitle(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' && c.src[c.pSrc] != 'i' { + return title(c) + } + + if !c.writeString("I") || !c.next() { + return false + } + if c.src[c.pSrc] == 'j' || c.src[c.pSrc] == 'J' { + return c.writeString("J") + } + c.unreadRune() + return true +} + +func nlTitleSpan(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' { + return isTitle(c) + } + if !c.next() || c.src[c.pSrc] == 'j' { + return false + } + if c.src[c.pSrc] != 'J' { + c.unreadRune() + } + return true +} + +// Not part of CLDR, but see https://unicode.org/cldr/trac/ticket/7078. +func afnlRewrite(c *context) { + if c.hasPrefix("'") || c.hasPrefix("’") { + c.isMidWord = true + } +} diff --git a/vendor/golang.org/x/text/cases/tables10.0.0.go b/vendor/golang.org/x/text/cases/tables10.0.0.go new file mode 100644 index 00000000..bd28ae14 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables10.0.0.go @@ -0,0 +1,2255 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.10 && !go1.13 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var xorData string = "" + // Size: 185 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a\x00\x02:" + + "\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&\x00\x01*" + + "\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2068 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ιΙΙ\x166ΐ" + + "Ϊ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12φΦΦ\x12" + + "\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x12\x12вВВ\x12\x12дД" + + "Д\x12\x12оОО\x12\x12сСС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13" + + "\x1bꙋꙊꙊ\x13\x1bẖH̱H̱\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1ba" + + "ʾAʾAʾ\x13\x1bṡṠṠ\x12\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166" + + "ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ" + + "\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ" + + "\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ" + + "\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨ" + + "Ι\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15" + + "\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ" + + "\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ" + + "\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰι" + + "ᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12" + + "\x12ιΙΙ\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1c" + + "ηιῃΗΙ\x166ῒΪ̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ" + + "̀\x166ΰΫ́Ϋ́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙ" + + "ῼ\x14$ώιΏΙΏͅ\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk" + + "\x12\x10åå\x12\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ" + + "\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ" + + "\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFF" + + "Ff\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12" + + "stSTSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄ" + + "ԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 11892 bytes (11.61 KiB). Checksum: c6f15484b7653775. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 18: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 18 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 20 blocks, 1280 entries, 2560 bytes +// The third block is the zero block. +var caseValues = [1280]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x110a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x118a, + 0x19e: 0x120a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x128d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x130a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x144a, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x158a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x160a, 0x251: 0x168a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x170a, 0x256: 0x178a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x180a, 0x271: 0x188a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x190a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x0812, 0x281: 0x0812, 0x282: 0x0812, 0x283: 0x0812, 0x284: 0x0812, 0x285: 0x0812, + 0x288: 0x0813, 0x289: 0x0813, 0x28a: 0x0813, 0x28b: 0x0813, + 0x28c: 0x0813, 0x28d: 0x0813, 0x290: 0x239a, 0x291: 0x0812, + 0x292: 0x247a, 0x293: 0x0812, 0x294: 0x25ba, 0x295: 0x0812, 0x296: 0x26fa, 0x297: 0x0812, + 0x299: 0x0813, 0x29b: 0x0813, 0x29d: 0x0813, + 0x29f: 0x0813, 0x2a0: 0x0812, 0x2a1: 0x0812, 0x2a2: 0x0812, 0x2a3: 0x0812, + 0x2a4: 0x0812, 0x2a5: 0x0812, 0x2a6: 0x0812, 0x2a7: 0x0812, 0x2a8: 0x0813, 0x2a9: 0x0813, + 0x2aa: 0x0813, 0x2ab: 0x0813, 0x2ac: 0x0813, 0x2ad: 0x0813, 0x2ae: 0x0813, 0x2af: 0x0813, + 0x2b0: 0x8b52, 0x2b1: 0x8b52, 0x2b2: 0x8e52, 0x2b3: 0x8e52, 0x2b4: 0x9152, 0x2b5: 0x9152, + 0x2b6: 0x9452, 0x2b7: 0x9452, 0x2b8: 0x9752, 0x2b9: 0x9752, 0x2ba: 0x9a52, 0x2bb: 0x9a52, + 0x2bc: 0x4d52, 0x2bd: 0x4d52, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x283a, 0x2c1: 0x292a, 0x2c2: 0x2a1a, 0x2c3: 0x2b0a, 0x2c4: 0x2bfa, 0x2c5: 0x2cea, + 0x2c6: 0x2dda, 0x2c7: 0x2eca, 0x2c8: 0x2fb9, 0x2c9: 0x30a9, 0x2ca: 0x3199, 0x2cb: 0x3289, + 0x2cc: 0x3379, 0x2cd: 0x3469, 0x2ce: 0x3559, 0x2cf: 0x3649, 0x2d0: 0x373a, 0x2d1: 0x382a, + 0x2d2: 0x391a, 0x2d3: 0x3a0a, 0x2d4: 0x3afa, 0x2d5: 0x3bea, 0x2d6: 0x3cda, 0x2d7: 0x3dca, + 0x2d8: 0x3eb9, 0x2d9: 0x3fa9, 0x2da: 0x4099, 0x2db: 0x4189, 0x2dc: 0x4279, 0x2dd: 0x4369, + 0x2de: 0x4459, 0x2df: 0x4549, 0x2e0: 0x463a, 0x2e1: 0x472a, 0x2e2: 0x481a, 0x2e3: 0x490a, + 0x2e4: 0x49fa, 0x2e5: 0x4aea, 0x2e6: 0x4bda, 0x2e7: 0x4cca, 0x2e8: 0x4db9, 0x2e9: 0x4ea9, + 0x2ea: 0x4f99, 0x2eb: 0x5089, 0x2ec: 0x5179, 0x2ed: 0x5269, 0x2ee: 0x5359, 0x2ef: 0x5449, + 0x2f0: 0x0812, 0x2f1: 0x0812, 0x2f2: 0x553a, 0x2f3: 0x564a, 0x2f4: 0x571a, + 0x2f6: 0x57fa, 0x2f7: 0x58da, 0x2f8: 0x0813, 0x2f9: 0x0813, 0x2fa: 0x8b53, 0x2fb: 0x8b53, + 0x2fc: 0x5a19, 0x2fd: 0x0004, 0x2fe: 0x5aea, 0x2ff: 0x0004, + // Block 0xc, offset 0x300 + 0x300: 0x0004, 0x301: 0x0004, 0x302: 0x5b6a, 0x303: 0x5c7a, 0x304: 0x5d4a, + 0x306: 0x5e2a, 0x307: 0x5f0a, 0x308: 0x8e53, 0x309: 0x8e53, 0x30a: 0x9153, 0x30b: 0x9153, + 0x30c: 0x6049, 0x30d: 0x0004, 0x30e: 0x0004, 0x30f: 0x0004, 0x310: 0x0812, 0x311: 0x0812, + 0x312: 0x611a, 0x313: 0x625a, 0x316: 0x639a, 0x317: 0x647a, + 0x318: 0x0813, 0x319: 0x0813, 0x31a: 0x9453, 0x31b: 0x9453, 0x31d: 0x0004, + 0x31e: 0x0004, 0x31f: 0x0004, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x65ba, 0x323: 0x66fa, + 0x324: 0x683a, 0x325: 0x0912, 0x326: 0x691a, 0x327: 0x69fa, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x9a53, 0x32b: 0x9a53, 0x32c: 0x0913, 0x32d: 0x0004, 0x32e: 0x0004, 0x32f: 0x0004, + 0x332: 0x6b3a, 0x333: 0x6c4a, 0x334: 0x6d1a, + 0x336: 0x6dfa, 0x337: 0x6eda, 0x338: 0x9753, 0x339: 0x9753, 0x33a: 0x4d53, 0x33b: 0x4d53, + 0x33c: 0x7019, 0x33d: 0x0004, 0x33e: 0x0004, + // Block 0xd, offset 0x340 + 0x342: 0x0013, + 0x347: 0x0013, 0x34a: 0x0012, 0x34b: 0x0013, + 0x34c: 0x0013, 0x34d: 0x0013, 0x34e: 0x0012, 0x34f: 0x0012, 0x350: 0x0013, 0x351: 0x0013, + 0x352: 0x0013, 0x353: 0x0012, 0x355: 0x0013, + 0x359: 0x0013, 0x35a: 0x0013, 0x35b: 0x0013, 0x35c: 0x0013, 0x35d: 0x0013, + 0x364: 0x0013, 0x366: 0x70eb, 0x368: 0x0013, + 0x36a: 0x714b, 0x36b: 0x718b, 0x36c: 0x0013, 0x36d: 0x0013, 0x36f: 0x0012, + 0x370: 0x0013, 0x371: 0x0013, 0x372: 0x9d53, 0x373: 0x0013, 0x374: 0x0012, 0x375: 0x0010, + 0x376: 0x0010, 0x377: 0x0010, 0x378: 0x0010, 0x379: 0x0012, + 0x37c: 0x0012, 0x37d: 0x0012, 0x37e: 0x0013, 0x37f: 0x0013, + // Block 0xe, offset 0x380 + 0x380: 0x1a13, 0x381: 0x1a13, 0x382: 0x1e13, 0x383: 0x1e13, 0x384: 0x1a13, 0x385: 0x1a13, + 0x386: 0x2613, 0x387: 0x2613, 0x388: 0x2a13, 0x389: 0x2a13, 0x38a: 0x2e13, 0x38b: 0x2e13, + 0x38c: 0x2a13, 0x38d: 0x2a13, 0x38e: 0x2613, 0x38f: 0x2613, 0x390: 0xa052, 0x391: 0xa052, + 0x392: 0xa352, 0x393: 0xa352, 0x394: 0xa652, 0x395: 0xa652, 0x396: 0xa352, 0x397: 0xa352, + 0x398: 0xa052, 0x399: 0xa052, 0x39a: 0x1a12, 0x39b: 0x1a12, 0x39c: 0x1e12, 0x39d: 0x1e12, + 0x39e: 0x1a12, 0x39f: 0x1a12, 0x3a0: 0x2612, 0x3a1: 0x2612, 0x3a2: 0x2a12, 0x3a3: 0x2a12, + 0x3a4: 0x2e12, 0x3a5: 0x2e12, 0x3a6: 0x2a12, 0x3a7: 0x2a12, 0x3a8: 0x2612, 0x3a9: 0x2612, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x6552, 0x3c1: 0x6552, 0x3c2: 0x6552, 0x3c3: 0x6552, 0x3c4: 0x6552, 0x3c5: 0x6552, + 0x3c6: 0x6552, 0x3c7: 0x6552, 0x3c8: 0x6552, 0x3c9: 0x6552, 0x3ca: 0x6552, 0x3cb: 0x6552, + 0x3cc: 0x6552, 0x3cd: 0x6552, 0x3ce: 0x6552, 0x3cf: 0x6552, 0x3d0: 0xa952, 0x3d1: 0xa952, + 0x3d2: 0xa952, 0x3d3: 0xa952, 0x3d4: 0xa952, 0x3d5: 0xa952, 0x3d6: 0xa952, 0x3d7: 0xa952, + 0x3d8: 0xa952, 0x3d9: 0xa952, 0x3da: 0xa952, 0x3db: 0xa952, 0x3dc: 0xa952, 0x3dd: 0xa952, + 0x3de: 0xa952, 0x3e0: 0x0113, 0x3e1: 0x0112, 0x3e2: 0x71eb, 0x3e3: 0x8853, + 0x3e4: 0x724b, 0x3e5: 0x72aa, 0x3e6: 0x730a, 0x3e7: 0x0f13, 0x3e8: 0x0f12, 0x3e9: 0x0313, + 0x3ea: 0x0312, 0x3eb: 0x0713, 0x3ec: 0x0712, 0x3ed: 0x736b, 0x3ee: 0x73cb, 0x3ef: 0x742b, + 0x3f0: 0x748b, 0x3f1: 0x0012, 0x3f2: 0x0113, 0x3f3: 0x0112, 0x3f4: 0x0012, 0x3f5: 0x0313, + 0x3f6: 0x0312, 0x3f7: 0x0012, 0x3f8: 0x0012, 0x3f9: 0x0012, 0x3fa: 0x0012, 0x3fb: 0x0012, + 0x3fc: 0x0015, 0x3fd: 0x0015, 0x3fe: 0x74eb, 0x3ff: 0x754b, + // Block 0x10, offset 0x400 + 0x400: 0x0113, 0x401: 0x0112, 0x402: 0x0113, 0x403: 0x0112, 0x404: 0x0113, 0x405: 0x0112, + 0x406: 0x0113, 0x407: 0x0112, 0x408: 0x0014, 0x409: 0x0014, 0x40a: 0x0014, 0x40b: 0x0713, + 0x40c: 0x0712, 0x40d: 0x75ab, 0x40e: 0x0012, 0x40f: 0x0010, 0x410: 0x0113, 0x411: 0x0112, + 0x412: 0x0113, 0x413: 0x0112, 0x414: 0x0012, 0x415: 0x0012, 0x416: 0x0113, 0x417: 0x0112, + 0x418: 0x0113, 0x419: 0x0112, 0x41a: 0x0113, 0x41b: 0x0112, 0x41c: 0x0113, 0x41d: 0x0112, + 0x41e: 0x0113, 0x41f: 0x0112, 0x420: 0x0113, 0x421: 0x0112, 0x422: 0x0113, 0x423: 0x0112, + 0x424: 0x0113, 0x425: 0x0112, 0x426: 0x0113, 0x427: 0x0112, 0x428: 0x0113, 0x429: 0x0112, + 0x42a: 0x760b, 0x42b: 0x766b, 0x42c: 0x76cb, 0x42d: 0x772b, 0x42e: 0x778b, + 0x430: 0x77eb, 0x431: 0x784b, 0x432: 0x78ab, 0x433: 0xac53, 0x434: 0x0113, 0x435: 0x0112, + 0x436: 0x0113, 0x437: 0x0112, + // Block 0x11, offset 0x440 + 0x440: 0x790a, 0x441: 0x798a, 0x442: 0x7a0a, 0x443: 0x7a8a, 0x444: 0x7b3a, 0x445: 0x7bea, + 0x446: 0x7c6a, + 0x453: 0x7cea, 0x454: 0x7dca, 0x455: 0x7eaa, 0x456: 0x7f8a, 0x457: 0x806a, + 0x45d: 0x0010, + 0x45e: 0x0034, 0x45f: 0x0010, 0x460: 0x0010, 0x461: 0x0010, 0x462: 0x0010, 0x463: 0x0010, + 0x464: 0x0010, 0x465: 0x0010, 0x466: 0x0010, 0x467: 0x0010, 0x468: 0x0010, + 0x46a: 0x0010, 0x46b: 0x0010, 0x46c: 0x0010, 0x46d: 0x0010, 0x46e: 0x0010, 0x46f: 0x0010, + 0x470: 0x0010, 0x471: 0x0010, 0x472: 0x0010, 0x473: 0x0010, 0x474: 0x0010, 0x475: 0x0010, + 0x476: 0x0010, 0x478: 0x0010, 0x479: 0x0010, 0x47a: 0x0010, 0x47b: 0x0010, + 0x47c: 0x0010, 0x47e: 0x0010, + // Block 0x12, offset 0x480 + 0x480: 0x2213, 0x481: 0x2213, 0x482: 0x2613, 0x483: 0x2613, 0x484: 0x2213, 0x485: 0x2213, + 0x486: 0x2e13, 0x487: 0x2e13, 0x488: 0x2213, 0x489: 0x2213, 0x48a: 0x2613, 0x48b: 0x2613, + 0x48c: 0x2213, 0x48d: 0x2213, 0x48e: 0x3e13, 0x48f: 0x3e13, 0x490: 0x2213, 0x491: 0x2213, + 0x492: 0x2613, 0x493: 0x2613, 0x494: 0x2213, 0x495: 0x2213, 0x496: 0x2e13, 0x497: 0x2e13, + 0x498: 0x2213, 0x499: 0x2213, 0x49a: 0x2613, 0x49b: 0x2613, 0x49c: 0x2213, 0x49d: 0x2213, + 0x49e: 0xb553, 0x49f: 0xb553, 0x4a0: 0xb853, 0x4a1: 0xb853, 0x4a2: 0x2212, 0x4a3: 0x2212, + 0x4a4: 0x2612, 0x4a5: 0x2612, 0x4a6: 0x2212, 0x4a7: 0x2212, 0x4a8: 0x2e12, 0x4a9: 0x2e12, + 0x4aa: 0x2212, 0x4ab: 0x2212, 0x4ac: 0x2612, 0x4ad: 0x2612, 0x4ae: 0x2212, 0x4af: 0x2212, + 0x4b0: 0x3e12, 0x4b1: 0x3e12, 0x4b2: 0x2212, 0x4b3: 0x2212, 0x4b4: 0x2612, 0x4b5: 0x2612, + 0x4b6: 0x2212, 0x4b7: 0x2212, 0x4b8: 0x2e12, 0x4b9: 0x2e12, 0x4ba: 0x2212, 0x4bb: 0x2212, + 0x4bc: 0x2612, 0x4bd: 0x2612, 0x4be: 0x2212, 0x4bf: 0x2212, + // Block 0x13, offset 0x4c0 + 0x4c2: 0x0010, + 0x4c7: 0x0010, 0x4c9: 0x0010, 0x4cb: 0x0010, + 0x4cd: 0x0010, 0x4ce: 0x0010, 0x4cf: 0x0010, 0x4d1: 0x0010, + 0x4d2: 0x0010, 0x4d4: 0x0010, 0x4d7: 0x0010, + 0x4d9: 0x0010, 0x4db: 0x0010, 0x4dd: 0x0010, + 0x4df: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, + 0x4e4: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, 0x4e9: 0x0010, + 0x4ea: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f7: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x12, 0xc3: 0x13, 0xc4: 0x14, 0xc5: 0x15, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x16, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x17, 0xcc: 0x18, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x19, 0xd1: 0x1a, 0xd2: 0x1b, 0xd3: 0x1c, 0xd4: 0x1d, 0xd5: 0x1e, 0xd6: 0x1f, 0xd7: 0x20, + 0xd8: 0x21, 0xd9: 0x22, 0xda: 0x23, 0xdb: 0x24, 0xdc: 0x25, 0xdd: 0x26, 0xde: 0x27, 0xdf: 0x28, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x29, 0x121: 0x2a, 0x122: 0x2b, 0x123: 0x2c, 0x124: 0x2d, 0x125: 0x2e, 0x126: 0x2f, 0x127: 0x30, + 0x128: 0x31, 0x129: 0x32, 0x12a: 0x33, 0x12b: 0x34, 0x12c: 0x35, 0x12d: 0x36, 0x12e: 0x37, 0x12f: 0x38, + 0x130: 0x39, 0x131: 0x3a, 0x132: 0x3b, 0x133: 0x3c, 0x134: 0x3d, 0x135: 0x3e, 0x136: 0x3f, 0x137: 0x40, + 0x138: 0x41, 0x139: 0x42, 0x13a: 0x43, 0x13b: 0x44, 0x13c: 0x45, 0x13d: 0x46, 0x13e: 0x47, 0x13f: 0x48, + // Block 0x5, offset 0x140 + 0x140: 0x49, 0x141: 0x4a, 0x142: 0x4b, 0x143: 0x4c, 0x144: 0x23, 0x145: 0x23, 0x146: 0x23, 0x147: 0x23, + 0x148: 0x23, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x23, 0x152: 0x23, 0x153: 0x23, 0x154: 0x23, 0x155: 0x23, 0x156: 0x23, 0x157: 0x23, + 0x158: 0x23, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x08, 0x17e: 0x09, 0x17f: 0x0a, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0b, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0c, + 0x1b0: 0x7c, 0x1b1: 0x0d, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x23, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x23, 0x202: 0x23, 0x203: 0x23, 0x204: 0x23, 0x205: 0x23, 0x206: 0x23, 0x207: 0x23, + 0x208: 0x23, 0x209: 0x23, 0x20a: 0x23, 0x20b: 0x23, 0x20c: 0x23, 0x20d: 0x23, 0x20e: 0x23, 0x20f: 0x23, + 0x210: 0x23, 0x211: 0x23, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x23, 0x215: 0x23, 0x216: 0x23, 0x217: 0x23, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x0e, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x23, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x23, 0x231: 0x23, 0x232: 0x23, 0x233: 0x23, 0x234: 0x23, 0x235: 0x23, 0x236: 0x23, 0x237: 0x23, + 0x238: 0x23, 0x239: 0x23, 0x23a: 0x23, 0x23b: 0x23, 0x23c: 0x23, 0x23d: 0x23, 0x23e: 0x23, 0x23f: 0x23, + // Block 0x9, offset 0x240 + 0x240: 0x23, 0x241: 0x23, 0x242: 0x23, 0x243: 0x23, 0x244: 0x23, 0x245: 0x23, 0x246: 0x23, 0x247: 0x23, + 0x248: 0x23, 0x249: 0x23, 0x24a: 0x23, 0x24b: 0x23, 0x24c: 0x23, 0x24d: 0x23, 0x24e: 0x23, 0x24f: 0x23, + 0x250: 0x23, 0x251: 0x23, 0x252: 0x23, 0x253: 0x23, 0x254: 0x23, 0x255: 0x23, 0x256: 0x23, 0x257: 0x23, + 0x258: 0x23, 0x259: 0x23, 0x25a: 0x23, 0x25b: 0x23, 0x25c: 0x23, 0x25d: 0x23, 0x25e: 0x23, 0x25f: 0x23, + 0x260: 0x23, 0x261: 0x23, 0x262: 0x23, 0x263: 0x23, 0x264: 0x23, 0x265: 0x23, 0x266: 0x23, 0x267: 0x23, + 0x268: 0x23, 0x269: 0x23, 0x26a: 0x23, 0x26b: 0x23, 0x26c: 0x23, 0x26d: 0x23, 0x26e: 0x23, 0x26f: 0x23, + 0x270: 0x23, 0x271: 0x23, 0x272: 0x23, 0x273: 0x23, 0x274: 0x23, 0x275: 0x23, 0x276: 0x23, 0x277: 0x23, + 0x278: 0x23, 0x279: 0x23, 0x27a: 0x23, 0x27b: 0x23, 0x27c: 0x23, 0x27d: 0x23, 0x27e: 0x23, 0x27f: 0x23, + // Block 0xa, offset 0x280 + 0x280: 0x23, 0x281: 0x23, 0x282: 0x23, 0x283: 0x23, 0x284: 0x23, 0x285: 0x23, 0x286: 0x23, 0x287: 0x23, + 0x288: 0x23, 0x289: 0x23, 0x28a: 0x23, 0x28b: 0x23, 0x28c: 0x23, 0x28d: 0x23, 0x28e: 0x23, 0x28f: 0x23, + 0x290: 0x23, 0x291: 0x23, 0x292: 0x23, 0x293: 0x23, 0x294: 0x23, 0x295: 0x23, 0x296: 0x23, 0x297: 0x23, + 0x298: 0x23, 0x299: 0x23, 0x29a: 0x23, 0x29b: 0x23, 0x29c: 0x23, 0x29d: 0x23, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x0f, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x23, 0x2f1: 0x23, 0x2f2: 0x23, 0x2f3: 0x23, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x23, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x23, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x23, 0x319: 0x23, 0x31a: 0x23, 0x31b: 0x23, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x23, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, + // Block 0xd, offset 0x340 + 0x340: 0xd3, 0x341: 0xd4, 0x342: 0xd5, 0x343: 0xd6, 0x344: 0xd7, 0x345: 0xd8, 0x346: 0xd9, 0x347: 0xda, + 0x348: 0xdb, 0x34a: 0xdc, 0x34b: 0xdd, 0x34c: 0xde, 0x34d: 0xdf, + 0x350: 0xe0, 0x351: 0xe1, 0x352: 0xe2, 0x353: 0xe3, 0x356: 0xe4, 0x357: 0xe5, + 0x358: 0xe6, 0x359: 0xe7, 0x35a: 0xe8, 0x35b: 0xe9, 0x35c: 0xea, + 0x362: 0xeb, 0x363: 0xec, + 0x368: 0xed, 0x369: 0xee, 0x36a: 0xef, 0x36b: 0xf0, + 0x370: 0xf1, 0x371: 0xf2, 0x372: 0xf3, 0x374: 0xf4, 0x375: 0xf5, + // Block 0xe, offset 0x380 + 0x380: 0x23, 0x381: 0x23, 0x382: 0x23, 0x383: 0x23, 0x384: 0x23, 0x385: 0x23, 0x386: 0x23, 0x387: 0x23, + 0x388: 0x23, 0x389: 0x23, 0x38a: 0x23, 0x38b: 0x23, 0x38c: 0x23, 0x38d: 0x23, 0x38e: 0xf6, + 0x390: 0x23, 0x391: 0xf7, 0x392: 0x23, 0x393: 0x23, 0x394: 0x23, 0x395: 0xf8, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x23, 0x3c1: 0x23, 0x3c2: 0x23, 0x3c3: 0x23, 0x3c4: 0x23, 0x3c5: 0x23, 0x3c6: 0x23, 0x3c7: 0x23, + 0x3c8: 0x23, 0x3c9: 0x23, 0x3ca: 0x23, 0x3cb: 0x23, 0x3cc: 0x23, 0x3cd: 0x23, 0x3ce: 0x23, 0x3cf: 0x23, + 0x3d0: 0xf7, + // Block 0x10, offset 0x400 + 0x410: 0x23, 0x411: 0x23, 0x412: 0x23, 0x413: 0x23, 0x414: 0x23, 0x415: 0x23, 0x416: 0x23, 0x417: 0x23, + 0x418: 0x23, 0x419: 0xf9, + // Block 0x11, offset 0x440 + 0x460: 0x23, 0x461: 0x23, 0x462: 0x23, 0x463: 0x23, 0x464: 0x23, 0x465: 0x23, 0x466: 0x23, 0x467: 0x23, + 0x468: 0xf0, 0x469: 0xfa, 0x46b: 0xfb, 0x46c: 0xfc, 0x46d: 0xfd, 0x46e: 0xfe, + 0x47c: 0x23, 0x47d: 0xff, 0x47e: 0x100, 0x47f: 0x101, + // Block 0x12, offset 0x480 + 0x4b0: 0x23, 0x4b1: 0x102, 0x4b2: 0x103, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x104, 0x4c6: 0x105, + 0x4c9: 0x106, + 0x4d0: 0x107, 0x4d1: 0x108, 0x4d2: 0x109, 0x4d3: 0x10a, 0x4d4: 0x10b, 0x4d5: 0x10c, 0x4d6: 0x10d, 0x4d7: 0x10e, + 0x4d8: 0x10f, 0x4d9: 0x110, 0x4da: 0x111, 0x4db: 0x112, 0x4dc: 0x113, 0x4dd: 0x114, 0x4de: 0x115, 0x4df: 0x116, + 0x4e8: 0x117, 0x4e9: 0x118, 0x4ea: 0x119, + // Block 0x14, offset 0x500 + 0x500: 0x11a, + 0x520: 0x23, 0x521: 0x23, 0x522: 0x23, 0x523: 0x11b, 0x524: 0x10, 0x525: 0x11c, + 0x538: 0x11d, 0x539: 0x11, 0x53a: 0x11e, + // Block 0x15, offset 0x540 + 0x544: 0x11f, 0x545: 0x120, 0x546: 0x121, + 0x54f: 0x122, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x123, 0x5c1: 0x124, 0x5c4: 0x124, 0x5c5: 0x124, 0x5c6: 0x124, 0x5c7: 0x125, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 277 entries, 554 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x35, 0x38, 0x3c, 0x3f, 0x43, 0x4d, 0x4f, 0x54, 0x64, 0x6b, 0x70, 0x7e, 0x7f, 0x8d, 0x9c, 0xa6, 0xa9, 0xaf, 0xb7, 0xba, 0xbc, 0xca, 0xd0, 0xde, 0xe9, 0xf5, 0x100, 0x10c, 0x116, 0x122, 0x12d, 0x139, 0x145, 0x14d, 0x155, 0x15f, 0x16a, 0x176, 0x17d, 0x188, 0x18d, 0x195, 0x198, 0x19d, 0x1a1, 0x1a5, 0x1ac, 0x1b5, 0x1bd, 0x1be, 0x1c7, 0x1ce, 0x1d6, 0x1dc, 0x1e2, 0x1e7, 0x1eb, 0x1ee, 0x1f0, 0x1f3, 0x1f8, 0x1f9, 0x1fb, 0x1fd, 0x1ff, 0x206, 0x20b, 0x20f, 0x218, 0x21b, 0x21e, 0x224, 0x225, 0x230, 0x231, 0x232, 0x237, 0x244, 0x24c, 0x254, 0x25d, 0x266, 0x26f, 0x274, 0x277, 0x280, 0x28d, 0x28f, 0x296, 0x298, 0x2a4, 0x2a5, 0x2b0, 0x2b8, 0x2c0, 0x2c6, 0x2c7, 0x2d5, 0x2da, 0x2dd, 0x2e2, 0x2e6, 0x2ec, 0x2f1, 0x2f4, 0x2f9, 0x2fe, 0x2ff, 0x305, 0x307, 0x308, 0x30a, 0x30c, 0x30f, 0x310, 0x312, 0x315, 0x31b, 0x31f, 0x321, 0x326, 0x32d, 0x331, 0x33a, 0x33b, 0x343, 0x347, 0x34c, 0x354, 0x35a, 0x360, 0x36a, 0x36f, 0x378, 0x37e, 0x385, 0x389, 0x391, 0x393, 0x395, 0x398, 0x39a, 0x39c, 0x39d, 0x39e, 0x3a0, 0x3a2, 0x3a8, 0x3ad, 0x3af, 0x3b5, 0x3b8, 0x3ba, 0x3c0, 0x3c5, 0x3c7, 0x3c8, 0x3c9, 0x3ca, 0x3cc, 0x3ce, 0x3d0, 0x3d3, 0x3d5, 0x3d8, 0x3e0, 0x3e3, 0x3e7, 0x3ef, 0x3f1, 0x3f2, 0x3f3, 0x3f5, 0x3fb, 0x3fd, 0x3fe, 0x400, 0x402, 0x404, 0x411, 0x412, 0x413, 0x417, 0x419, 0x41a, 0x41b, 0x41c, 0x41d, 0x421, 0x425, 0x42b, 0x42d, 0x434, 0x437, 0x43b, 0x441, 0x44a, 0x450, 0x456, 0x460, 0x46a, 0x46c, 0x473, 0x479, 0x47f, 0x485, 0x488, 0x48e, 0x491, 0x499, 0x49a, 0x4a1, 0x4a2, 0x4a5, 0x4af, 0x4b5, 0x4bb, 0x4bc, 0x4c2, 0x4c5, 0x4cd, 0x4d4, 0x4db, 0x4dc, 0x4dd, 0x4de, 0x4df, 0x4e1, 0x4e3, 0x4e5, 0x4e9, 0x4ea, 0x4ec, 0x4ed, 0x4ee, 0x4f0, 0x4f5, 0x4fa, 0x4fe, 0x4ff, 0x502, 0x506, 0x511, 0x515, 0x51d, 0x522, 0x526, 0x529, 0x52d, 0x530, 0x533, 0x538, 0x53c, 0x540, 0x544, 0x548, 0x54a, 0x54c, 0x54f, 0x554, 0x556, 0x55b, 0x564, 0x569, 0x56a, 0x56d, 0x56e, 0x56f, 0x571, 0x572, 0x573} + +// sparseValues: 1395 entries, 5580 bytes +var sparseValues = [1395]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xbf}, + // Block 0x6, offset 0x35 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x38 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3c + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3f + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x43 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4d + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4f + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x54 + {value: 0x6852, lo: 0x80, hi: 0x86}, + {value: 0x198a, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0024, lo: 0x92, hi: 0x95}, + {value: 0x0034, lo: 0x96, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x99}, + {value: 0x0034, lo: 0x9a, hi: 0x9b}, + {value: 0x0024, lo: 0x9c, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa7}, + {value: 0x0024, lo: 0xa8, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xbd}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x64 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xf, offset 0x6b + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x10, offset 0x70 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x11, offset 0x7e + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x12, offset 0x7f + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8d + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x14, offset 0x9c + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x15, offset 0xa6 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x16, offset 0xa9 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + // Block 0x17, offset 0xaf + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x18, offset 0xb7 + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x19, offset 0xba + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x1a, offset 0xbc + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1b, offset 0xca + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1c, offset 0xd0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1d, offset 0xde + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0xe9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x1f, offset 0xf5 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x20, offset 0x100 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x21, offset 0x10c + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x22, offset 0x116 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x23, offset 0x122 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x12d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x25, offset 0x139 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x26, offset 0x145 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x27, offset 0x14d + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x28, offset 0x155 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x29, offset 0x15f + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x2a, offset 0x16a + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2b, offset 0x176 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2c, offset 0x17d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2d, offset 0x188 + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2e, offset 0x18d + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2f, offset 0x195 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x30, offset 0x198 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x31, offset 0x19d + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x32, offset 0x1a1 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x33, offset 0x1a5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x34, offset 0x1ac + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x1b5 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x36, offset 0x1bd + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x37, offset 0x1be + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x38, offset 0x1c7 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x39, offset 0x1ce + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d6 + {value: 0x7053, lo: 0x80, hi: 0x85}, + {value: 0x7053, lo: 0x87, hi: 0x87}, + {value: 0x7053, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x3b, offset 0x1dc + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x1e2 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3d, offset 0x1e7 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3e, offset 0x1eb + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3f, offset 0x1ee + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x40, offset 0x1f0 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x41, offset 0x1f3 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x42, offset 0x1f8 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x43, offset 0x1f9 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x44, offset 0x1fb + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x45, offset 0x1fd + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x46, offset 0x1ff + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x47, offset 0x206 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x48, offset 0x20b + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x49, offset 0x20f + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x4a, offset 0x218 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x4b, offset 0x21b + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb7}, + // Block 0x4c, offset 0x21e + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x224 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4e, offset 0x225 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4f, offset 0x230 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x50, offset 0x231 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x51, offset 0x232 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x52, offset 0x237 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x53, offset 0x244 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x54, offset 0x24c + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x254 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x56, offset 0x25d + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x57, offset 0x266 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x58, offset 0x26f + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x59, offset 0x274 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x5a, offset 0x277 + {value: 0x1a6a, lo: 0x80, hi: 0x80}, + {value: 0x1aea, lo: 0x81, hi: 0x81}, + {value: 0x1b6a, lo: 0x82, hi: 0x82}, + {value: 0x1bea, lo: 0x83, hi: 0x83}, + {value: 0x1c6a, lo: 0x84, hi: 0x84}, + {value: 0x1cea, lo: 0x85, hi: 0x85}, + {value: 0x1d6a, lo: 0x86, hi: 0x86}, + {value: 0x1dea, lo: 0x87, hi: 0x87}, + {value: 0x1e6a, lo: 0x88, hi: 0x88}, + // Block 0x5b, offset 0x280 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5c, offset 0x28d + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5d, offset 0x28f + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8452, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8852, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x296 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5f, offset 0x298 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x2a4 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x2a5 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x1f1a, lo: 0x96, hi: 0x96}, + {value: 0x1fca, lo: 0x97, hi: 0x97}, + {value: 0x207a, lo: 0x98, hi: 0x98}, + {value: 0x212a, lo: 0x99, hi: 0x99}, + {value: 0x21da, lo: 0x9a, hi: 0x9a}, + {value: 0x228a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x233b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x62, offset 0x2b0 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x63, offset 0x2b8 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2c0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x2c6 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x66, offset 0x2c7 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x67, offset 0x2d5 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0x9d52, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x68, offset 0x2da + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x69, offset 0x2dd + {value: 0xa053, lo: 0xb6, hi: 0xb7}, + {value: 0xa353, lo: 0xb8, hi: 0xb9}, + {value: 0xa653, lo: 0xba, hi: 0xbb}, + {value: 0xa353, lo: 0xbc, hi: 0xbd}, + {value: 0xa053, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x2e2 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xa953, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e6 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6c, offset 0x2ec + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6d, offset 0x2f1 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6f, offset 0x2f9 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x70, offset 0x2fe + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x71, offset 0x2ff + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x72, offset 0x305 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x73, offset 0x307 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x74, offset 0x308 + {value: 0x0010, lo: 0x85, hi: 0xae}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x75, offset 0x30a + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x76, offset 0x30c + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x77, offset 0x30f + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x78, offset 0x310 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x79, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x7a, offset 0x315 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x31b + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7c, offset 0x31f + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7d, offset 0x321 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7e, offset 0x326 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8453, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7f, offset 0x32d + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x80, offset 0x331 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x81, offset 0x33a + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x82, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x83, offset 0x343 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x347 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x85, offset 0x34c + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x86, offset 0x354 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x87, offset 0x35a + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x88, offset 0x360 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x89, offset 0x36a + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x36f + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8b, offset 0x378 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37e + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xac52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x385 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x389 + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8f, offset 0x391 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x393 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x91, offset 0x395 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x92, offset 0x398 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x93, offset 0x39a + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x94, offset 0x39c + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x95, offset 0x39d + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x96, offset 0x39e + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x97, offset 0x3a0 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x98, offset 0x3a2 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x99, offset 0x3a8 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x3ad + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3af + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x3b5 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9d, offset 0x3b8 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9e, offset 0x3ba + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9f, offset 0x3c0 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x3c5 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa1, offset 0x3c7 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa2, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa3, offset 0x3c9 + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa4, offset 0x3ca + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x3cc + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa6, offset 0x3ce + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa7, offset 0x3d0 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa8, offset 0x3d3 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa9, offset 0x3d5 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xaa, offset 0x3d8 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xaf53, lo: 0x98, hi: 0x9f}, + {value: 0xb253, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e0 + {value: 0xaf52, lo: 0x80, hi: 0x87}, + {value: 0xb252, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xac, offset 0x3e3 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb253, lo: 0xb0, hi: 0xb7}, + {value: 0xaf53, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x3e7 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb252, lo: 0x98, hi: 0x9f}, + {value: 0xaf52, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xae, offset 0x3ef + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xaf, offset 0x3f1 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xb0, offset 0x3f2 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb1, offset 0x3f3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb2, offset 0x3f5 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x3fb + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb4, offset 0x3fd + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb5, offset 0x3fe + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb6, offset 0x400 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb7, offset 0x402 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb8, offset 0x404 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb3}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x411 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xba, offset 0x412 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xbb, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbc, offset 0x417 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbd, offset 0x419 + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbe, offset 0x41a + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbf, offset 0x41b + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x41c + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc1, offset 0x41d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc2, offset 0x421 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x425 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc4, offset 0x42b + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc5, offset 0x42d + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc6, offset 0x434 + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc7, offset 0x437 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43b + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xc9, offset 0x441 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xca, offset 0x44a + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcb, offset 0x450 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcc, offset 0x456 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcd, offset 0x460 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xce, offset 0x46a + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xcf, offset 0x46c + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd0, offset 0x473 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x479 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd2, offset 0x47f + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x485 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd4, offset 0x488 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x48e + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd6, offset 0x491 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd7, offset 0x499 + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd8, offset 0x49a + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd9, offset 0x4a1 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xda, offset 0x4a2 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdb, offset 0x4a5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xdc, offset 0x4af + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xdd, offset 0x4b5 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x86, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + // Block 0xde, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xdf, offset 0x4bc + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe0, offset 0x4c2 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x4c5 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xe2, offset 0x4cd + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x4d4 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xe4, offset 0x4db + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe5, offset 0x4dc + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xe6, offset 0x4dd + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xe7, offset 0x4de + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xe8, offset 0x4df + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe9, offset 0x4e1 + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xea, offset 0x4e3 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xeb, offset 0x4e5 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xec, offset 0x4e9 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xed, offset 0x4ea + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xee, offset 0x4ec + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xef, offset 0x4ed + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + // Block 0xf0, offset 0x4ee + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xf1, offset 0x4f0 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xf2, offset 0x4f5 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xf3, offset 0x4fa + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xf4, offset 0x4fe + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xf5, offset 0x4ff + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xf6, offset 0x502 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xf7, offset 0x506 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xf8, offset 0x511 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xf9, offset 0x515 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xfa, offset 0x51d + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x522 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0xfc, offset 0x526 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0xfd, offset 0x529 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0xfe, offset 0x52d + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0xff, offset 0x530 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x100, offset 0x533 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x101, offset 0x538 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x102, offset 0x53c + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x103, offset 0x540 + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x104, offset 0x544 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x105, offset 0x548 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x106, offset 0x54a + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x107, offset 0x54c + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x108, offset 0x54f + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x109, offset 0x554 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x10a, offset 0x556 + {value: 0xb552, lo: 0x80, hi: 0x81}, + {value: 0xb852, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x10b, offset 0x55b + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x10c, offset 0x564 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x10d, offset 0x569 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10e, offset 0x56a + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10f, offset 0x56d + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x110, offset 0x56e + {value: 0x0004, lo: 0xbb, hi: 0xbf}, + // Block 0x111, offset 0x56f + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x112, offset 0x571 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x113, offset 0x572 + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 14177 bytes (13KiB); checksum: F17D40E8 diff --git a/vendor/golang.org/x/text/cases/tables11.0.0.go b/vendor/golang.org/x/text/cases/tables11.0.0.go new file mode 100644 index 00000000..ce00ce37 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables11.0.0.go @@ -0,0 +1,2316 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.13 && !go1.14 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +var xorData string = "" + // Size: 188 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x001\x00\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a" + + "\x00\x02:\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&" + + "\x00\x01*\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00" + + "\x01\x22" + +var exceptions string = "" + // Size: 2436 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ιΙΙ\x166ΐ" + + "Ϊ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12φΦΦ\x12" + + "\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x10\x1bᲐა\x10\x1bᲑბ" + + "\x10\x1bᲒგ\x10\x1bᲓდ\x10\x1bᲔე\x10\x1bᲕვ\x10\x1bᲖზ\x10\x1bᲗთ\x10\x1bᲘი" + + "\x10\x1bᲙკ\x10\x1bᲚლ\x10\x1bᲛმ\x10\x1bᲜნ\x10\x1bᲝო\x10\x1bᲞპ\x10\x1bᲟჟ" + + "\x10\x1bᲠრ\x10\x1bᲡს\x10\x1bᲢტ\x10\x1bᲣუ\x10\x1bᲤფ\x10\x1bᲥქ\x10\x1bᲦღ" + + "\x10\x1bᲧყ\x10\x1bᲨშ\x10\x1bᲩჩ\x10\x1bᲪც\x10\x1bᲫძ\x10\x1bᲬწ\x10\x1bᲭჭ" + + "\x10\x1bᲮხ\x10\x1bᲯჯ\x10\x1bᲰჰ\x10\x1bᲱჱ\x10\x1bᲲჲ\x10\x1bᲳჳ\x10\x1bᲴჴ" + + "\x10\x1bᲵჵ\x10\x1bᲶჶ\x10\x1bᲷჷ\x10\x1bᲸჸ\x10\x1bᲹჹ\x10\x1bᲺჺ\x10\x1bᲽჽ" + + "\x10\x1bᲾჾ\x10\x1bᲿჿ\x12\x12вВВ\x12\x12дДД\x12\x12оОО\x12\x12сСС\x12\x12" + + "тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13\x1bꙋꙊꙊ\x13\x1bẖH̱H̱\x13\x1bẗ" + + "T̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1baʾAʾAʾ\x13\x1bṡṠṠ\x12\x10ssß\x14" + + "$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ" + + "\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈ" + + "Ι\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15" + + "\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ" + + "\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨΙ\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ" + + "\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠι" + + "ὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧι" + + "ὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ" + + "\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰιᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ" + + "\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x12ιΙΙ\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ" + + "\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ\x166ῒΪ̀Ϊ̀\x166ΐΙ" + + "̈́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ̀\x166ΰΫ́Ϋ́\x14$ῤΡ̓Ρ̓" + + "\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ\x14$ῶΩ͂Ω͂\x16" + + "6ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12\x10ɫɫ\x12\x10ɽ" + + "ɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ" + + "\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ" + + "\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFFFf\x12\x12fiFIFi\x12\x12flFLFl" + + "\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12stSTSt\x12\x12stSTSt\x14$մնՄՆՄ" + + "ն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 12250 bytes (11.96 KiB). Checksum: 53ff6cb7321675e1. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 20: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 20 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 22 blocks, 1408 entries, 2816 bytes +// The third block is the zero block. +var caseValues = [1408]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x110a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x118a, + 0x19e: 0x120a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x128d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x130a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x144a, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x158a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x160a, 0x251: 0x168a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x170a, 0x256: 0x178a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x180a, 0x271: 0x188a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x190a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x6852, 0x281: 0x6852, 0x282: 0x6852, 0x283: 0x6852, 0x284: 0x6852, 0x285: 0x6852, + 0x286: 0x6852, 0x287: 0x198a, 0x288: 0x0012, + 0x291: 0x0034, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0034, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0034, 0x29b: 0x0034, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, 0x2a0: 0x0024, 0x2a1: 0x0024, 0x2a2: 0x0034, 0x2a3: 0x0034, + 0x2a4: 0x0034, 0x2a5: 0x0034, 0x2a6: 0x0034, 0x2a7: 0x0034, 0x2a8: 0x0024, 0x2a9: 0x0024, + 0x2aa: 0x0034, 0x2ab: 0x0024, 0x2ac: 0x0024, 0x2ad: 0x0034, 0x2ae: 0x0034, 0x2af: 0x0024, + 0x2b0: 0x0034, 0x2b1: 0x0034, 0x2b2: 0x0034, 0x2b3: 0x0034, 0x2b4: 0x0034, 0x2b5: 0x0034, + 0x2b6: 0x0034, 0x2b7: 0x0034, 0x2b8: 0x0034, 0x2b9: 0x0034, 0x2ba: 0x0034, 0x2bb: 0x0034, + 0x2bc: 0x0034, 0x2bd: 0x0034, 0x2bf: 0x0034, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x7053, 0x2c1: 0x7053, 0x2c2: 0x7053, 0x2c3: 0x7053, 0x2c4: 0x7053, 0x2c5: 0x7053, + 0x2c7: 0x7053, + 0x2cd: 0x7053, 0x2d0: 0x1a6a, 0x2d1: 0x1aea, + 0x2d2: 0x1b6a, 0x2d3: 0x1bea, 0x2d4: 0x1c6a, 0x2d5: 0x1cea, 0x2d6: 0x1d6a, 0x2d7: 0x1dea, + 0x2d8: 0x1e6a, 0x2d9: 0x1eea, 0x2da: 0x1f6a, 0x2db: 0x1fea, 0x2dc: 0x206a, 0x2dd: 0x20ea, + 0x2de: 0x216a, 0x2df: 0x21ea, 0x2e0: 0x226a, 0x2e1: 0x22ea, 0x2e2: 0x236a, 0x2e3: 0x23ea, + 0x2e4: 0x246a, 0x2e5: 0x24ea, 0x2e6: 0x256a, 0x2e7: 0x25ea, 0x2e8: 0x266a, 0x2e9: 0x26ea, + 0x2ea: 0x276a, 0x2eb: 0x27ea, 0x2ec: 0x286a, 0x2ed: 0x28ea, 0x2ee: 0x296a, 0x2ef: 0x29ea, + 0x2f0: 0x2a6a, 0x2f1: 0x2aea, 0x2f2: 0x2b6a, 0x2f3: 0x2bea, 0x2f4: 0x2c6a, 0x2f5: 0x2cea, + 0x2f6: 0x2d6a, 0x2f7: 0x2dea, 0x2f8: 0x2e6a, 0x2f9: 0x2eea, 0x2fa: 0x2f6a, + 0x2fc: 0x0014, 0x2fd: 0x2fea, 0x2fe: 0x306a, 0x2ff: 0x30ea, + // Block 0xc, offset 0x300 + 0x300: 0x0812, 0x301: 0x0812, 0x302: 0x0812, 0x303: 0x0812, 0x304: 0x0812, 0x305: 0x0812, + 0x308: 0x0813, 0x309: 0x0813, 0x30a: 0x0813, 0x30b: 0x0813, + 0x30c: 0x0813, 0x30d: 0x0813, 0x310: 0x3a9a, 0x311: 0x0812, + 0x312: 0x3b7a, 0x313: 0x0812, 0x314: 0x3cba, 0x315: 0x0812, 0x316: 0x3dfa, 0x317: 0x0812, + 0x319: 0x0813, 0x31b: 0x0813, 0x31d: 0x0813, + 0x31f: 0x0813, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x0812, 0x323: 0x0812, + 0x324: 0x0812, 0x325: 0x0812, 0x326: 0x0812, 0x327: 0x0812, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x0813, 0x32b: 0x0813, 0x32c: 0x0813, 0x32d: 0x0813, 0x32e: 0x0813, 0x32f: 0x0813, + 0x330: 0x8e52, 0x331: 0x8e52, 0x332: 0x9152, 0x333: 0x9152, 0x334: 0x9452, 0x335: 0x9452, + 0x336: 0x9752, 0x337: 0x9752, 0x338: 0x9a52, 0x339: 0x9a52, 0x33a: 0x9d52, 0x33b: 0x9d52, + 0x33c: 0x4d52, 0x33d: 0x4d52, + // Block 0xd, offset 0x340 + 0x340: 0x3f3a, 0x341: 0x402a, 0x342: 0x411a, 0x343: 0x420a, 0x344: 0x42fa, 0x345: 0x43ea, + 0x346: 0x44da, 0x347: 0x45ca, 0x348: 0x46b9, 0x349: 0x47a9, 0x34a: 0x4899, 0x34b: 0x4989, + 0x34c: 0x4a79, 0x34d: 0x4b69, 0x34e: 0x4c59, 0x34f: 0x4d49, 0x350: 0x4e3a, 0x351: 0x4f2a, + 0x352: 0x501a, 0x353: 0x510a, 0x354: 0x51fa, 0x355: 0x52ea, 0x356: 0x53da, 0x357: 0x54ca, + 0x358: 0x55b9, 0x359: 0x56a9, 0x35a: 0x5799, 0x35b: 0x5889, 0x35c: 0x5979, 0x35d: 0x5a69, + 0x35e: 0x5b59, 0x35f: 0x5c49, 0x360: 0x5d3a, 0x361: 0x5e2a, 0x362: 0x5f1a, 0x363: 0x600a, + 0x364: 0x60fa, 0x365: 0x61ea, 0x366: 0x62da, 0x367: 0x63ca, 0x368: 0x64b9, 0x369: 0x65a9, + 0x36a: 0x6699, 0x36b: 0x6789, 0x36c: 0x6879, 0x36d: 0x6969, 0x36e: 0x6a59, 0x36f: 0x6b49, + 0x370: 0x0812, 0x371: 0x0812, 0x372: 0x6c3a, 0x373: 0x6d4a, 0x374: 0x6e1a, + 0x376: 0x6efa, 0x377: 0x6fda, 0x378: 0x0813, 0x379: 0x0813, 0x37a: 0x8e53, 0x37b: 0x8e53, + 0x37c: 0x7119, 0x37d: 0x0004, 0x37e: 0x71ea, 0x37f: 0x0004, + // Block 0xe, offset 0x380 + 0x380: 0x0004, 0x381: 0x0004, 0x382: 0x726a, 0x383: 0x737a, 0x384: 0x744a, + 0x386: 0x752a, 0x387: 0x760a, 0x388: 0x9153, 0x389: 0x9153, 0x38a: 0x9453, 0x38b: 0x9453, + 0x38c: 0x7749, 0x38d: 0x0004, 0x38e: 0x0004, 0x38f: 0x0004, 0x390: 0x0812, 0x391: 0x0812, + 0x392: 0x781a, 0x393: 0x795a, 0x396: 0x7a9a, 0x397: 0x7b7a, + 0x398: 0x0813, 0x399: 0x0813, 0x39a: 0x9753, 0x39b: 0x9753, 0x39d: 0x0004, + 0x39e: 0x0004, 0x39f: 0x0004, 0x3a0: 0x0812, 0x3a1: 0x0812, 0x3a2: 0x7cba, 0x3a3: 0x7dfa, + 0x3a4: 0x7f3a, 0x3a5: 0x0912, 0x3a6: 0x801a, 0x3a7: 0x80fa, 0x3a8: 0x0813, 0x3a9: 0x0813, + 0x3aa: 0x9d53, 0x3ab: 0x9d53, 0x3ac: 0x0913, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, + 0x3b2: 0x823a, 0x3b3: 0x834a, 0x3b4: 0x841a, + 0x3b6: 0x84fa, 0x3b7: 0x85da, 0x3b8: 0x9a53, 0x3b9: 0x9a53, 0x3ba: 0x4d53, 0x3bb: 0x4d53, + 0x3bc: 0x8719, 0x3bd: 0x0004, 0x3be: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3c2: 0x0013, + 0x3c7: 0x0013, 0x3ca: 0x0012, 0x3cb: 0x0013, + 0x3cc: 0x0013, 0x3cd: 0x0013, 0x3ce: 0x0012, 0x3cf: 0x0012, 0x3d0: 0x0013, 0x3d1: 0x0013, + 0x3d2: 0x0013, 0x3d3: 0x0012, 0x3d5: 0x0013, + 0x3d9: 0x0013, 0x3da: 0x0013, 0x3db: 0x0013, 0x3dc: 0x0013, 0x3dd: 0x0013, + 0x3e4: 0x0013, 0x3e6: 0x87eb, 0x3e8: 0x0013, + 0x3ea: 0x884b, 0x3eb: 0x888b, 0x3ec: 0x0013, 0x3ed: 0x0013, 0x3ef: 0x0012, + 0x3f0: 0x0013, 0x3f1: 0x0013, 0x3f2: 0xa053, 0x3f3: 0x0013, 0x3f4: 0x0012, 0x3f5: 0x0010, + 0x3f6: 0x0010, 0x3f7: 0x0010, 0x3f8: 0x0010, 0x3f9: 0x0012, + 0x3fc: 0x0012, 0x3fd: 0x0012, 0x3fe: 0x0013, 0x3ff: 0x0013, + // Block 0x10, offset 0x400 + 0x400: 0x1a13, 0x401: 0x1a13, 0x402: 0x1e13, 0x403: 0x1e13, 0x404: 0x1a13, 0x405: 0x1a13, + 0x406: 0x2613, 0x407: 0x2613, 0x408: 0x2a13, 0x409: 0x2a13, 0x40a: 0x2e13, 0x40b: 0x2e13, + 0x40c: 0x2a13, 0x40d: 0x2a13, 0x40e: 0x2613, 0x40f: 0x2613, 0x410: 0xa352, 0x411: 0xa352, + 0x412: 0xa652, 0x413: 0xa652, 0x414: 0xa952, 0x415: 0xa952, 0x416: 0xa652, 0x417: 0xa652, + 0x418: 0xa352, 0x419: 0xa352, 0x41a: 0x1a12, 0x41b: 0x1a12, 0x41c: 0x1e12, 0x41d: 0x1e12, + 0x41e: 0x1a12, 0x41f: 0x1a12, 0x420: 0x2612, 0x421: 0x2612, 0x422: 0x2a12, 0x423: 0x2a12, + 0x424: 0x2e12, 0x425: 0x2e12, 0x426: 0x2a12, 0x427: 0x2a12, 0x428: 0x2612, 0x429: 0x2612, + // Block 0x11, offset 0x440 + 0x440: 0x6552, 0x441: 0x6552, 0x442: 0x6552, 0x443: 0x6552, 0x444: 0x6552, 0x445: 0x6552, + 0x446: 0x6552, 0x447: 0x6552, 0x448: 0x6552, 0x449: 0x6552, 0x44a: 0x6552, 0x44b: 0x6552, + 0x44c: 0x6552, 0x44d: 0x6552, 0x44e: 0x6552, 0x44f: 0x6552, 0x450: 0xac52, 0x451: 0xac52, + 0x452: 0xac52, 0x453: 0xac52, 0x454: 0xac52, 0x455: 0xac52, 0x456: 0xac52, 0x457: 0xac52, + 0x458: 0xac52, 0x459: 0xac52, 0x45a: 0xac52, 0x45b: 0xac52, 0x45c: 0xac52, 0x45d: 0xac52, + 0x45e: 0xac52, 0x460: 0x0113, 0x461: 0x0112, 0x462: 0x88eb, 0x463: 0x8b53, + 0x464: 0x894b, 0x465: 0x89aa, 0x466: 0x8a0a, 0x467: 0x0f13, 0x468: 0x0f12, 0x469: 0x0313, + 0x46a: 0x0312, 0x46b: 0x0713, 0x46c: 0x0712, 0x46d: 0x8a6b, 0x46e: 0x8acb, 0x46f: 0x8b2b, + 0x470: 0x8b8b, 0x471: 0x0012, 0x472: 0x0113, 0x473: 0x0112, 0x474: 0x0012, 0x475: 0x0313, + 0x476: 0x0312, 0x477: 0x0012, 0x478: 0x0012, 0x479: 0x0012, 0x47a: 0x0012, 0x47b: 0x0012, + 0x47c: 0x0015, 0x47d: 0x0015, 0x47e: 0x8beb, 0x47f: 0x8c4b, + // Block 0x12, offset 0x480 + 0x480: 0x0113, 0x481: 0x0112, 0x482: 0x0113, 0x483: 0x0112, 0x484: 0x0113, 0x485: 0x0112, + 0x486: 0x0113, 0x487: 0x0112, 0x488: 0x0014, 0x489: 0x0014, 0x48a: 0x0014, 0x48b: 0x0713, + 0x48c: 0x0712, 0x48d: 0x8cab, 0x48e: 0x0012, 0x48f: 0x0010, 0x490: 0x0113, 0x491: 0x0112, + 0x492: 0x0113, 0x493: 0x0112, 0x494: 0x0012, 0x495: 0x0012, 0x496: 0x0113, 0x497: 0x0112, + 0x498: 0x0113, 0x499: 0x0112, 0x49a: 0x0113, 0x49b: 0x0112, 0x49c: 0x0113, 0x49d: 0x0112, + 0x49e: 0x0113, 0x49f: 0x0112, 0x4a0: 0x0113, 0x4a1: 0x0112, 0x4a2: 0x0113, 0x4a3: 0x0112, + 0x4a4: 0x0113, 0x4a5: 0x0112, 0x4a6: 0x0113, 0x4a7: 0x0112, 0x4a8: 0x0113, 0x4a9: 0x0112, + 0x4aa: 0x8d0b, 0x4ab: 0x8d6b, 0x4ac: 0x8dcb, 0x4ad: 0x8e2b, 0x4ae: 0x8e8b, 0x4af: 0x0012, + 0x4b0: 0x8eeb, 0x4b1: 0x8f4b, 0x4b2: 0x8fab, 0x4b3: 0xaf53, 0x4b4: 0x0113, 0x4b5: 0x0112, + 0x4b6: 0x0113, 0x4b7: 0x0112, 0x4b8: 0x0113, 0x4b9: 0x0112, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x900a, 0x4c1: 0x908a, 0x4c2: 0x910a, 0x4c3: 0x918a, 0x4c4: 0x923a, 0x4c5: 0x92ea, + 0x4c6: 0x936a, + 0x4d3: 0x93ea, 0x4d4: 0x94ca, 0x4d5: 0x95aa, 0x4d6: 0x968a, 0x4d7: 0x976a, + 0x4dd: 0x0010, + 0x4de: 0x0034, 0x4df: 0x0010, 0x4e0: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, 0x4e3: 0x0010, + 0x4e4: 0x0010, 0x4e5: 0x0010, 0x4e6: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, + 0x4ea: 0x0010, 0x4eb: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f3: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f8: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, + // Block 0x14, offset 0x500 + 0x500: 0x2213, 0x501: 0x2213, 0x502: 0x2613, 0x503: 0x2613, 0x504: 0x2213, 0x505: 0x2213, + 0x506: 0x2e13, 0x507: 0x2e13, 0x508: 0x2213, 0x509: 0x2213, 0x50a: 0x2613, 0x50b: 0x2613, + 0x50c: 0x2213, 0x50d: 0x2213, 0x50e: 0x3e13, 0x50f: 0x3e13, 0x510: 0x2213, 0x511: 0x2213, + 0x512: 0x2613, 0x513: 0x2613, 0x514: 0x2213, 0x515: 0x2213, 0x516: 0x2e13, 0x517: 0x2e13, + 0x518: 0x2213, 0x519: 0x2213, 0x51a: 0x2613, 0x51b: 0x2613, 0x51c: 0x2213, 0x51d: 0x2213, + 0x51e: 0xb853, 0x51f: 0xb853, 0x520: 0xbb53, 0x521: 0xbb53, 0x522: 0x2212, 0x523: 0x2212, + 0x524: 0x2612, 0x525: 0x2612, 0x526: 0x2212, 0x527: 0x2212, 0x528: 0x2e12, 0x529: 0x2e12, + 0x52a: 0x2212, 0x52b: 0x2212, 0x52c: 0x2612, 0x52d: 0x2612, 0x52e: 0x2212, 0x52f: 0x2212, + 0x530: 0x3e12, 0x531: 0x3e12, 0x532: 0x2212, 0x533: 0x2212, 0x534: 0x2612, 0x535: 0x2612, + 0x536: 0x2212, 0x537: 0x2212, 0x538: 0x2e12, 0x539: 0x2e12, 0x53a: 0x2212, 0x53b: 0x2212, + 0x53c: 0x2612, 0x53d: 0x2612, 0x53e: 0x2212, 0x53f: 0x2212, + // Block 0x15, offset 0x540 + 0x542: 0x0010, + 0x547: 0x0010, 0x549: 0x0010, 0x54b: 0x0010, + 0x54d: 0x0010, 0x54e: 0x0010, 0x54f: 0x0010, 0x551: 0x0010, + 0x552: 0x0010, 0x554: 0x0010, 0x557: 0x0010, + 0x559: 0x0010, 0x55b: 0x0010, 0x55d: 0x0010, + 0x55f: 0x0010, 0x561: 0x0010, 0x562: 0x0010, + 0x564: 0x0010, 0x567: 0x0010, 0x568: 0x0010, 0x569: 0x0010, + 0x56a: 0x0010, 0x56c: 0x0010, 0x56d: 0x0010, 0x56e: 0x0010, 0x56f: 0x0010, + 0x570: 0x0010, 0x571: 0x0010, 0x572: 0x0010, 0x574: 0x0010, 0x575: 0x0010, + 0x576: 0x0010, 0x577: 0x0010, 0x579: 0x0010, 0x57a: 0x0010, 0x57b: 0x0010, + 0x57c: 0x0010, 0x57e: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x14, 0xc3: 0x15, 0xc4: 0x16, 0xc5: 0x17, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x18, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x19, 0xcc: 0x1a, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x1b, 0xd1: 0x1c, 0xd2: 0x1d, 0xd3: 0x1e, 0xd4: 0x1f, 0xd5: 0x20, 0xd6: 0x08, 0xd7: 0x21, + 0xd8: 0x22, 0xd9: 0x23, 0xda: 0x24, 0xdb: 0x25, 0xdc: 0x26, 0xdd: 0x27, 0xde: 0x28, 0xdf: 0x29, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x2a, 0x121: 0x2b, 0x122: 0x2c, 0x123: 0x2d, 0x124: 0x2e, 0x125: 0x2f, 0x126: 0x30, 0x127: 0x31, + 0x128: 0x32, 0x129: 0x33, 0x12a: 0x34, 0x12b: 0x35, 0x12c: 0x36, 0x12d: 0x37, 0x12e: 0x38, 0x12f: 0x39, + 0x130: 0x3a, 0x131: 0x3b, 0x132: 0x3c, 0x133: 0x3d, 0x134: 0x3e, 0x135: 0x3f, 0x136: 0x40, 0x137: 0x41, + 0x138: 0x42, 0x139: 0x43, 0x13a: 0x44, 0x13b: 0x45, 0x13c: 0x46, 0x13d: 0x47, 0x13e: 0x48, 0x13f: 0x49, + // Block 0x5, offset 0x140 + 0x140: 0x4a, 0x141: 0x4b, 0x142: 0x4c, 0x143: 0x09, 0x144: 0x24, 0x145: 0x24, 0x146: 0x24, 0x147: 0x24, + 0x148: 0x24, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x24, 0x152: 0x24, 0x153: 0x24, 0x154: 0x24, 0x155: 0x24, 0x156: 0x24, 0x157: 0x24, + 0x158: 0x24, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x0a, 0x17e: 0x0b, 0x17f: 0x0c, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0d, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0e, + 0x1b0: 0x7c, 0x1b1: 0x0f, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x24, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x24, 0x202: 0x24, 0x203: 0x24, 0x204: 0x24, 0x205: 0x24, 0x206: 0x24, 0x207: 0x24, + 0x208: 0x24, 0x209: 0x24, 0x20a: 0x24, 0x20b: 0x24, 0x20c: 0x24, 0x20d: 0x24, 0x20e: 0x24, 0x20f: 0x24, + 0x210: 0x24, 0x211: 0x24, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x24, 0x215: 0x24, 0x216: 0x24, 0x217: 0x24, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x10, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x24, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x24, 0x231: 0x24, 0x232: 0x24, 0x233: 0x24, 0x234: 0x24, 0x235: 0x24, 0x236: 0x24, 0x237: 0x24, + 0x238: 0x24, 0x239: 0x24, 0x23a: 0x24, 0x23b: 0x24, 0x23c: 0x24, 0x23d: 0x24, 0x23e: 0x24, 0x23f: 0x24, + // Block 0x9, offset 0x240 + 0x240: 0x24, 0x241: 0x24, 0x242: 0x24, 0x243: 0x24, 0x244: 0x24, 0x245: 0x24, 0x246: 0x24, 0x247: 0x24, + 0x248: 0x24, 0x249: 0x24, 0x24a: 0x24, 0x24b: 0x24, 0x24c: 0x24, 0x24d: 0x24, 0x24e: 0x24, 0x24f: 0x24, + 0x250: 0x24, 0x251: 0x24, 0x252: 0x24, 0x253: 0x24, 0x254: 0x24, 0x255: 0x24, 0x256: 0x24, 0x257: 0x24, + 0x258: 0x24, 0x259: 0x24, 0x25a: 0x24, 0x25b: 0x24, 0x25c: 0x24, 0x25d: 0x24, 0x25e: 0x24, 0x25f: 0x24, + 0x260: 0x24, 0x261: 0x24, 0x262: 0x24, 0x263: 0x24, 0x264: 0x24, 0x265: 0x24, 0x266: 0x24, 0x267: 0x24, + 0x268: 0x24, 0x269: 0x24, 0x26a: 0x24, 0x26b: 0x24, 0x26c: 0x24, 0x26d: 0x24, 0x26e: 0x24, 0x26f: 0x24, + 0x270: 0x24, 0x271: 0x24, 0x272: 0x24, 0x273: 0x24, 0x274: 0x24, 0x275: 0x24, 0x276: 0x24, 0x277: 0x24, + 0x278: 0x24, 0x279: 0x24, 0x27a: 0x24, 0x27b: 0x24, 0x27c: 0x24, 0x27d: 0x24, 0x27e: 0x24, 0x27f: 0x24, + // Block 0xa, offset 0x280 + 0x280: 0x24, 0x281: 0x24, 0x282: 0x24, 0x283: 0x24, 0x284: 0x24, 0x285: 0x24, 0x286: 0x24, 0x287: 0x24, + 0x288: 0x24, 0x289: 0x24, 0x28a: 0x24, 0x28b: 0x24, 0x28c: 0x24, 0x28d: 0x24, 0x28e: 0x24, 0x28f: 0x24, + 0x290: 0x24, 0x291: 0x24, 0x292: 0x24, 0x293: 0x24, 0x294: 0x24, 0x295: 0x24, 0x296: 0x24, 0x297: 0x24, + 0x298: 0x24, 0x299: 0x24, 0x29a: 0x24, 0x29b: 0x24, 0x29c: 0x24, 0x29d: 0x24, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x11, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x24, 0x2f1: 0x24, 0x2f2: 0x24, 0x2f3: 0x24, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x24, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x24, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x24, 0x319: 0x24, 0x31a: 0x24, 0x31b: 0x24, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x24, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, 0x334: 0xd3, + 0x33c: 0xd4, 0x33d: 0xd5, + // Block 0xd, offset 0x340 + 0x340: 0xd6, 0x341: 0xd7, 0x342: 0xd8, 0x343: 0xd9, 0x344: 0xda, 0x345: 0xdb, 0x346: 0xdc, 0x347: 0xdd, + 0x348: 0xde, 0x34a: 0xdf, 0x34b: 0xe0, 0x34c: 0xe1, 0x34d: 0xe2, + 0x350: 0xe3, 0x351: 0xe4, 0x352: 0xe5, 0x353: 0xe6, 0x356: 0xe7, 0x357: 0xe8, + 0x358: 0xe9, 0x359: 0xea, 0x35a: 0xeb, 0x35b: 0xec, 0x35c: 0xed, + 0x360: 0xee, 0x362: 0xef, 0x363: 0xf0, + 0x368: 0xf1, 0x369: 0xf2, 0x36a: 0xf3, 0x36b: 0xf4, + 0x370: 0xf5, 0x371: 0xf6, 0x372: 0xf7, 0x374: 0xf8, 0x375: 0xf9, 0x376: 0xfa, + 0x37b: 0xfb, + // Block 0xe, offset 0x380 + 0x380: 0x24, 0x381: 0x24, 0x382: 0x24, 0x383: 0x24, 0x384: 0x24, 0x385: 0x24, 0x386: 0x24, 0x387: 0x24, + 0x388: 0x24, 0x389: 0x24, 0x38a: 0x24, 0x38b: 0x24, 0x38c: 0x24, 0x38d: 0x24, 0x38e: 0xfc, + 0x390: 0x24, 0x391: 0xfd, 0x392: 0x24, 0x393: 0x24, 0x394: 0x24, 0x395: 0xfe, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x24, 0x3c1: 0x24, 0x3c2: 0x24, 0x3c3: 0x24, 0x3c4: 0x24, 0x3c5: 0x24, 0x3c6: 0x24, 0x3c7: 0x24, + 0x3c8: 0x24, 0x3c9: 0x24, 0x3ca: 0x24, 0x3cb: 0x24, 0x3cc: 0x24, 0x3cd: 0x24, 0x3ce: 0x24, 0x3cf: 0x24, + 0x3d0: 0xfd, + // Block 0x10, offset 0x400 + 0x410: 0x24, 0x411: 0x24, 0x412: 0x24, 0x413: 0x24, 0x414: 0x24, 0x415: 0x24, 0x416: 0x24, 0x417: 0x24, + 0x418: 0x24, 0x419: 0xff, + // Block 0x11, offset 0x440 + 0x460: 0x24, 0x461: 0x24, 0x462: 0x24, 0x463: 0x24, 0x464: 0x24, 0x465: 0x24, 0x466: 0x24, 0x467: 0x24, + 0x468: 0xf4, 0x469: 0x100, 0x46b: 0x101, 0x46c: 0x102, 0x46d: 0x103, 0x46e: 0x104, + 0x479: 0x105, 0x47c: 0x24, 0x47d: 0x106, 0x47e: 0x107, 0x47f: 0x108, + // Block 0x12, offset 0x480 + 0x4b0: 0x24, 0x4b1: 0x109, 0x4b2: 0x10a, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x10b, 0x4c6: 0x10c, + 0x4c9: 0x10d, + 0x4d0: 0x10e, 0x4d1: 0x10f, 0x4d2: 0x110, 0x4d3: 0x111, 0x4d4: 0x112, 0x4d5: 0x113, 0x4d6: 0x114, 0x4d7: 0x115, + 0x4d8: 0x116, 0x4d9: 0x117, 0x4da: 0x118, 0x4db: 0x119, 0x4dc: 0x11a, 0x4dd: 0x11b, 0x4de: 0x11c, 0x4df: 0x11d, + 0x4e8: 0x11e, 0x4e9: 0x11f, 0x4ea: 0x120, + // Block 0x14, offset 0x500 + 0x500: 0x121, + 0x520: 0x24, 0x521: 0x24, 0x522: 0x24, 0x523: 0x122, 0x524: 0x12, 0x525: 0x123, + 0x538: 0x124, 0x539: 0x13, 0x53a: 0x125, + // Block 0x15, offset 0x540 + 0x544: 0x126, 0x545: 0x127, 0x546: 0x128, + 0x54f: 0x129, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x12a, 0x5c1: 0x12b, 0x5c4: 0x12b, 0x5c5: 0x12b, 0x5c6: 0x12b, 0x5c7: 0x12c, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 282 entries, 564 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x35, 0x38, 0x3c, 0x3f, 0x43, 0x4d, 0x4f, 0x57, 0x5e, 0x63, 0x71, 0x72, 0x80, 0x8f, 0x99, 0x9c, 0xa3, 0xab, 0xae, 0xb0, 0xbf, 0xc5, 0xd3, 0xde, 0xeb, 0xf6, 0x102, 0x10c, 0x118, 0x123, 0x12f, 0x13b, 0x143, 0x14c, 0x156, 0x161, 0x16d, 0x174, 0x17f, 0x184, 0x18c, 0x18f, 0x194, 0x198, 0x19c, 0x1a3, 0x1ac, 0x1b4, 0x1b5, 0x1be, 0x1c5, 0x1cd, 0x1d3, 0x1d8, 0x1dc, 0x1df, 0x1e1, 0x1e4, 0x1e9, 0x1ea, 0x1ec, 0x1ee, 0x1f0, 0x1f7, 0x1fc, 0x200, 0x209, 0x20c, 0x20f, 0x215, 0x216, 0x221, 0x222, 0x223, 0x228, 0x235, 0x23d, 0x245, 0x24e, 0x257, 0x260, 0x265, 0x268, 0x273, 0x280, 0x282, 0x289, 0x28b, 0x297, 0x298, 0x2a3, 0x2ab, 0x2b3, 0x2b9, 0x2ba, 0x2c8, 0x2cd, 0x2d0, 0x2d5, 0x2d9, 0x2df, 0x2e4, 0x2e7, 0x2ec, 0x2f1, 0x2f2, 0x2f8, 0x2fa, 0x2fb, 0x2fd, 0x2ff, 0x302, 0x303, 0x305, 0x308, 0x30e, 0x312, 0x314, 0x319, 0x320, 0x324, 0x32d, 0x32e, 0x337, 0x33b, 0x340, 0x348, 0x34e, 0x354, 0x35e, 0x363, 0x36c, 0x372, 0x379, 0x37d, 0x385, 0x387, 0x389, 0x38c, 0x38e, 0x390, 0x391, 0x392, 0x394, 0x396, 0x39c, 0x3a1, 0x3a3, 0x3a9, 0x3ac, 0x3ae, 0x3b4, 0x3b9, 0x3bb, 0x3bc, 0x3bd, 0x3be, 0x3c0, 0x3c2, 0x3c4, 0x3c7, 0x3c9, 0x3cc, 0x3d4, 0x3d7, 0x3db, 0x3e3, 0x3e5, 0x3e6, 0x3e7, 0x3e9, 0x3ef, 0x3f1, 0x3f2, 0x3f4, 0x3f6, 0x3f8, 0x405, 0x406, 0x407, 0x40b, 0x40d, 0x40e, 0x40f, 0x410, 0x411, 0x414, 0x417, 0x41d, 0x421, 0x425, 0x42b, 0x42e, 0x435, 0x439, 0x43d, 0x444, 0x44d, 0x453, 0x459, 0x463, 0x46d, 0x46f, 0x477, 0x47d, 0x483, 0x489, 0x48c, 0x492, 0x495, 0x49d, 0x49e, 0x4a5, 0x4a9, 0x4aa, 0x4ad, 0x4b5, 0x4bb, 0x4c2, 0x4c3, 0x4c9, 0x4cc, 0x4d4, 0x4db, 0x4e5, 0x4ed, 0x4f0, 0x4f1, 0x4f2, 0x4f3, 0x4f4, 0x4f6, 0x4f8, 0x4fa, 0x4fe, 0x4ff, 0x501, 0x503, 0x504, 0x505, 0x507, 0x50c, 0x511, 0x515, 0x516, 0x519, 0x51d, 0x528, 0x52c, 0x534, 0x539, 0x53d, 0x540, 0x544, 0x547, 0x54a, 0x54f, 0x553, 0x557, 0x55b, 0x55f, 0x561, 0x563, 0x566, 0x56b, 0x56d, 0x572, 0x57b, 0x580, 0x581, 0x584, 0x585, 0x586, 0x588, 0x589, 0x58a} + +// sparseValues: 1418 entries, 5672 bytes +var sparseValues = [1418]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xbf}, + // Block 0x6, offset 0x35 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x38 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3c + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3f + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x43 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4d + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4f + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9b, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0xa0, hi: 0xa0}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x57 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xaf, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xe, offset 0x5e + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xf, offset 0x63 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x10, offset 0x71 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x11, offset 0x72 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x12, offset 0x80 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x14, offset 0x99 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x15, offset 0x9c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0x16, offset 0xa3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x17, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x18, offset 0xae + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x19, offset 0xb0 + {value: 0x0034, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1a, offset 0xbf + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1b, offset 0xc5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1c, offset 0xd3 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0xde + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xeb + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x1f, offset 0xf6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x20, offset 0x102 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x21, offset 0x10c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x22, offset 0x118 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x23, offset 0x123 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x24, offset 0x12f + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x26, offset 0x143 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x27, offset 0x14c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x28, offset 0x156 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2a, offset 0x16d + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2b, offset 0x174 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2c, offset 0x17f + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2d, offset 0x184 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2e, offset 0x18c + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x2f, offset 0x18f + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x30, offset 0x194 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x31, offset 0x198 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x32, offset 0x19c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x33, offset 0x1a3 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x34, offset 0x1ac + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x35, offset 0x1b4 + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x36, offset 0x1b5 + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x37, offset 0x1be + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x38, offset 0x1c5 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x39, offset 0x1cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3b, offset 0x1d8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3c, offset 0x1dc + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3d, offset 0x1df + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x3e, offset 0x1e1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x3f, offset 0x1e4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x40, offset 0x1e9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x41, offset 0x1ea + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x42, offset 0x1ec + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x43, offset 0x1ee + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x44, offset 0x1f0 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x45, offset 0x1f7 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x46, offset 0x1fc + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x47, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x48, offset 0x209 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x20c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb8}, + // Block 0x4a, offset 0x20f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4b, offset 0x215 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4c, offset 0x216 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x221 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x4e, offset 0x222 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x4f, offset 0x223 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x50, offset 0x228 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x51, offset 0x235 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x52, offset 0x23d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x54, offset 0x24e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x55, offset 0x257 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x56, offset 0x260 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x57, offset 0x265 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x58, offset 0x268 + {value: 0x316a, lo: 0x80, hi: 0x80}, + {value: 0x31ea, lo: 0x81, hi: 0x81}, + {value: 0x326a, lo: 0x82, hi: 0x82}, + {value: 0x32ea, lo: 0x83, hi: 0x83}, + {value: 0x336a, lo: 0x84, hi: 0x84}, + {value: 0x33ea, lo: 0x85, hi: 0x85}, + {value: 0x346a, lo: 0x86, hi: 0x86}, + {value: 0x34ea, lo: 0x87, hi: 0x87}, + {value: 0x356a, lo: 0x88, hi: 0x88}, + {value: 0x8353, lo: 0x90, hi: 0xba}, + {value: 0x8353, lo: 0xbd, hi: 0xbf}, + // Block 0x59, offset 0x273 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5a, offset 0x280 + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5b, offset 0x282 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8752, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8b52, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5c, offset 0x289 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5d, offset 0x28b + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x5e, offset 0x297 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x298 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x361a, lo: 0x96, hi: 0x96}, + {value: 0x36ca, lo: 0x97, hi: 0x97}, + {value: 0x377a, lo: 0x98, hi: 0x98}, + {value: 0x382a, lo: 0x99, hi: 0x99}, + {value: 0x38da, lo: 0x9a, hi: 0x9a}, + {value: 0x398a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x3a3b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x60, offset 0x2a3 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x61, offset 0x2ab + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x2b3 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x2b9 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x64, offset 0x2ba + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x65, offset 0x2c8 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0xa052, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x2cd + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x67, offset 0x2d0 + {value: 0xa353, lo: 0xb6, hi: 0xb7}, + {value: 0xa653, lo: 0xb8, hi: 0xb9}, + {value: 0xa953, lo: 0xba, hi: 0xbb}, + {value: 0xa653, lo: 0xbc, hi: 0xbd}, + {value: 0xa353, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x2d5 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xac53, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x69, offset 0x2d9 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6a, offset 0x2df + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e4 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6c, offset 0x2e7 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6d, offset 0x2ec + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x6e, offset 0x2f1 + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x6f, offset 0x2f2 + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x70, offset 0x2f8 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x71, offset 0x2fa + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x72, offset 0x2fb + {value: 0x0010, lo: 0x85, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x73, offset 0x2fd + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x74, offset 0x2ff + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x75, offset 0x302 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x76, offset 0x303 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x77, offset 0x305 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x78, offset 0x308 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x79, offset 0x30e + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7a, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7b, offset 0x314 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7c, offset 0x319 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8753, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7d, offset 0x320 + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x7e, offset 0x324 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x7f, offset 0x32d + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x80, offset 0x32e + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x337 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x82, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x83, offset 0x340 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x84, offset 0x348 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x85, offset 0x34e + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x86, offset 0x354 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x87, offset 0x35e + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x363 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x89, offset 0x36c + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8a, offset 0x372 + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xaf52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8b, offset 0x379 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37d + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8d, offset 0x385 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x387 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x8f, offset 0x389 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x90, offset 0x38c + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x91, offset 0x38e + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x92, offset 0x390 + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x93, offset 0x391 + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x94, offset 0x392 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x95, offset 0x394 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x96, offset 0x396 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x97, offset 0x39c + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x98, offset 0x3a1 + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x3a3 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x3a9 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9b, offset 0x3ac + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9c, offset 0x3ae + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9d, offset 0x3b4 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x3b9 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0x9f, offset 0x3bb + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa0, offset 0x3bc + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa1, offset 0x3bd + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa2, offset 0x3be + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x3c0 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa4, offset 0x3c2 + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa5, offset 0x3c4 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa6, offset 0x3c7 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa7, offset 0x3c9 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xa8, offset 0x3cc + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xb253, lo: 0x98, hi: 0x9f}, + {value: 0xb553, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xa9, offset 0x3d4 + {value: 0xb252, lo: 0x80, hi: 0x87}, + {value: 0xb552, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xaa, offset 0x3d7 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb553, lo: 0xb0, hi: 0xb7}, + {value: 0xb253, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3db + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb552, lo: 0x98, hi: 0x9f}, + {value: 0xb252, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xac, offset 0x3e3 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xad, offset 0x3e5 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xae, offset 0x3e6 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xaf, offset 0x3e7 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb0, offset 0x3e9 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x3ef + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb2, offset 0x3f1 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb3, offset 0x3f2 + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb4, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb5, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb6, offset 0x3f8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb5}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x405 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xb8, offset 0x406 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xb9, offset 0x407 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xba, offset 0x40b + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbb, offset 0x40d + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbc, offset 0x40e + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbd, offset 0x40f + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xbe, offset 0x410 + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xbf, offset 0x411 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc0, offset 0x414 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc1, offset 0x417 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x87}, + {value: 0x0024, lo: 0x88, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x8b}, + {value: 0x0024, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + // Block 0xc2, offset 0x41d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc3, offset 0x421 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x425 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc5, offset 0x42b + {value: 0x0014, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc6, offset 0x42e + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc7, offset 0x435 + {value: 0x0010, lo: 0x84, hi: 0x86}, + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc8, offset 0x439 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x43d + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x89, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xca, offset 0x444 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xcb, offset 0x44d + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcc, offset 0x453 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcd, offset 0x459 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xce, offset 0x463 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xcf, offset 0x46d + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xd0, offset 0x46f + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0x9e, hi: 0x9e}, + // Block 0xd1, offset 0x477 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd2, offset 0x47d + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd3, offset 0x483 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd4, offset 0x489 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd5, offset 0x48c + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x492 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd7, offset 0x495 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd8, offset 0x49d + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd9, offset 0x49e + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xda, offset 0x4a5 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + // Block 0xdb, offset 0x4a9 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xdc, offset 0x4aa + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdd, offset 0x4ad + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xde, offset 0x4b5 + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xdf, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x86, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + // Block 0xe0, offset 0x4c2 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xe1, offset 0x4c3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe2, offset 0x4c9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xe3, offset 0x4cc + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xe4, offset 0x4d4 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xe5, offset 0x4db + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa5}, + {value: 0x0010, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xe6, offset 0x4e5 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0x96}, + {value: 0x0034, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe7, offset 0x4ed + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + // Block 0xe8, offset 0x4f0 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe9, offset 0x4f1 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xea, offset 0x4f2 + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xeb, offset 0x4f3 + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xec, offset 0x4f4 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xed, offset 0x4f6 + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xee, offset 0x4f8 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xef, offset 0x4fa + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xf0, offset 0x4fe + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xf1, offset 0x4ff + {value: 0x2013, lo: 0x80, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xbf}, + // Block 0xf2, offset 0x501 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xf3, offset 0x503 + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xf4, offset 0x504 + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + // Block 0xf5, offset 0x505 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xf6, offset 0x507 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xf7, offset 0x50c + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xf8, offset 0x511 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xf9, offset 0x515 + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xfa, offset 0x516 + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xfb, offset 0x519 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xfc, offset 0x51d + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xfd, offset 0x528 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xfe, offset 0x52c + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xff, offset 0x534 + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0x100, offset 0x539 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0x101, offset 0x53d + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0x102, offset 0x540 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x544 + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x104, offset 0x547 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x105, offset 0x54a + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x106, offset 0x54f + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x107, offset 0x553 + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x108, offset 0x557 + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x109, offset 0x55b + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x10a, offset 0x55f + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x10b, offset 0x561 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x10c, offset 0x563 + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x10d, offset 0x566 + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x10e, offset 0x56b + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x10f, offset 0x56d + {value: 0xb852, lo: 0x80, hi: 0x81}, + {value: 0xbb52, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x110, offset 0x572 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x111, offset 0x57b + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x112, offset 0x580 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x113, offset 0x581 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x114, offset 0x584 + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x115, offset 0x585 + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x116, offset 0x586 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x117, offset 0x588 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x118, offset 0x589 + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 14906 bytes (14KiB); checksum: 362795C7 diff --git a/vendor/golang.org/x/text/cases/tables12.0.0.go b/vendor/golang.org/x/text/cases/tables12.0.0.go new file mode 100644 index 00000000..84d841b1 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables12.0.0.go @@ -0,0 +1,2359 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.14 && !go1.16 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +var xorData string = "" + // Size: 192 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x001\x00\x00\x0b(\x04\x00\x03\x04\x1e\x00\x0b)\x08" + + "\x00\x03\x0a\x00\x02:\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<" + + "\x00\x01&\x00\x01*\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01" + + "\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2450 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꟅꟅ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ι" + + "ΙΙ\x166ΐΪ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12" + + "φΦΦ\x12\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x10\x1bᲐა" + + "\x10\x1bᲑბ\x10\x1bᲒგ\x10\x1bᲓდ\x10\x1bᲔე\x10\x1bᲕვ\x10\x1bᲖზ\x10\x1bᲗთ" + + "\x10\x1bᲘი\x10\x1bᲙკ\x10\x1bᲚლ\x10\x1bᲛმ\x10\x1bᲜნ\x10\x1bᲝო\x10\x1bᲞპ" + + "\x10\x1bᲟჟ\x10\x1bᲠრ\x10\x1bᲡს\x10\x1bᲢტ\x10\x1bᲣუ\x10\x1bᲤფ\x10\x1bᲥქ" + + "\x10\x1bᲦღ\x10\x1bᲧყ\x10\x1bᲨშ\x10\x1bᲩჩ\x10\x1bᲪც\x10\x1bᲫძ\x10\x1bᲬწ" + + "\x10\x1bᲭჭ\x10\x1bᲮხ\x10\x1bᲯჯ\x10\x1bᲰჰ\x10\x1bᲱჱ\x10\x1bᲲჲ\x10\x1bᲳჳ" + + "\x10\x1bᲴჴ\x10\x1bᲵჵ\x10\x1bᲶჶ\x10\x1bᲷჷ\x10\x1bᲸჸ\x10\x1bᲹჹ\x10\x1bᲺჺ" + + "\x10\x1bᲽჽ\x10\x1bᲾჾ\x10\x1bᲿჿ\x12\x12вВВ\x12\x12дДД\x12\x12оОО\x12\x12с" + + "СС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13\x1bꙋꙊꙊ\x13\x1bẖH̱H̱" + + "\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1baʾAʾAʾ\x13\x1bṡṠṠ\x12" + + "\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ" + + "\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ" + + "\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ\x15\x1dἄιᾄἌΙ\x15" + + "\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ\x15+ἢιἪΙᾚ\x15+ἣι" + + "ἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨΙ\x15\x1dἡιᾑἩΙ" + + "\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15\x1dἦιᾖἮΙ\x15" + + "\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ\x15+ὥιὭΙᾭ" + + "\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ\x15\x1dὣιᾣὫΙ" + + "\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰιᾺΙᾺͅ\x14#αιΑΙ" + + "ᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x12ιΙΙ\x15-ὴιῊΙ" + + "Ὴͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ\x166ῒΙ" + + "̈̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ̀\x166ΰΫ́Ϋ" + + "́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ" + + "\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12" + + "\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12" + + "\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12" + + "\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x10ʂʂ\x12\x12ffFFFf" + + "\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12st" + + "STSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄ" + + "խ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 12396 bytes (12.11 KiB). Checksum: c0656238384c3da1. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 20: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 20 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 22 blocks, 1408 entries, 2816 bytes +// The third block is the zero block. +var caseValues = [1408]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x110a, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x118a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x120a, + 0x19e: 0x128a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x130d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x138a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x14ca, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x160a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x168a, 0x251: 0x170a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x178a, 0x256: 0x180a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x188a, 0x271: 0x190a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x198a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x6852, 0x281: 0x6852, 0x282: 0x6852, 0x283: 0x6852, 0x284: 0x6852, 0x285: 0x6852, + 0x286: 0x6852, 0x287: 0x1a0a, 0x288: 0x0012, + 0x291: 0x0034, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0034, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0034, 0x29b: 0x0034, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, 0x2a0: 0x0024, 0x2a1: 0x0024, 0x2a2: 0x0034, 0x2a3: 0x0034, + 0x2a4: 0x0034, 0x2a5: 0x0034, 0x2a6: 0x0034, 0x2a7: 0x0034, 0x2a8: 0x0024, 0x2a9: 0x0024, + 0x2aa: 0x0034, 0x2ab: 0x0024, 0x2ac: 0x0024, 0x2ad: 0x0034, 0x2ae: 0x0034, 0x2af: 0x0024, + 0x2b0: 0x0034, 0x2b1: 0x0034, 0x2b2: 0x0034, 0x2b3: 0x0034, 0x2b4: 0x0034, 0x2b5: 0x0034, + 0x2b6: 0x0034, 0x2b7: 0x0034, 0x2b8: 0x0034, 0x2b9: 0x0034, 0x2ba: 0x0034, 0x2bb: 0x0034, + 0x2bc: 0x0034, 0x2bd: 0x0034, 0x2bf: 0x0034, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x7053, 0x2c1: 0x7053, 0x2c2: 0x7053, 0x2c3: 0x7053, 0x2c4: 0x7053, 0x2c5: 0x7053, + 0x2c7: 0x7053, + 0x2cd: 0x7053, 0x2d0: 0x1aea, 0x2d1: 0x1b6a, + 0x2d2: 0x1bea, 0x2d3: 0x1c6a, 0x2d4: 0x1cea, 0x2d5: 0x1d6a, 0x2d6: 0x1dea, 0x2d7: 0x1e6a, + 0x2d8: 0x1eea, 0x2d9: 0x1f6a, 0x2da: 0x1fea, 0x2db: 0x206a, 0x2dc: 0x20ea, 0x2dd: 0x216a, + 0x2de: 0x21ea, 0x2df: 0x226a, 0x2e0: 0x22ea, 0x2e1: 0x236a, 0x2e2: 0x23ea, 0x2e3: 0x246a, + 0x2e4: 0x24ea, 0x2e5: 0x256a, 0x2e6: 0x25ea, 0x2e7: 0x266a, 0x2e8: 0x26ea, 0x2e9: 0x276a, + 0x2ea: 0x27ea, 0x2eb: 0x286a, 0x2ec: 0x28ea, 0x2ed: 0x296a, 0x2ee: 0x29ea, 0x2ef: 0x2a6a, + 0x2f0: 0x2aea, 0x2f1: 0x2b6a, 0x2f2: 0x2bea, 0x2f3: 0x2c6a, 0x2f4: 0x2cea, 0x2f5: 0x2d6a, + 0x2f6: 0x2dea, 0x2f7: 0x2e6a, 0x2f8: 0x2eea, 0x2f9: 0x2f6a, 0x2fa: 0x2fea, + 0x2fc: 0x0014, 0x2fd: 0x306a, 0x2fe: 0x30ea, 0x2ff: 0x316a, + // Block 0xc, offset 0x300 + 0x300: 0x0812, 0x301: 0x0812, 0x302: 0x0812, 0x303: 0x0812, 0x304: 0x0812, 0x305: 0x0812, + 0x308: 0x0813, 0x309: 0x0813, 0x30a: 0x0813, 0x30b: 0x0813, + 0x30c: 0x0813, 0x30d: 0x0813, 0x310: 0x3b1a, 0x311: 0x0812, + 0x312: 0x3bfa, 0x313: 0x0812, 0x314: 0x3d3a, 0x315: 0x0812, 0x316: 0x3e7a, 0x317: 0x0812, + 0x319: 0x0813, 0x31b: 0x0813, 0x31d: 0x0813, + 0x31f: 0x0813, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x0812, 0x323: 0x0812, + 0x324: 0x0812, 0x325: 0x0812, 0x326: 0x0812, 0x327: 0x0812, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x0813, 0x32b: 0x0813, 0x32c: 0x0813, 0x32d: 0x0813, 0x32e: 0x0813, 0x32f: 0x0813, + 0x330: 0x9252, 0x331: 0x9252, 0x332: 0x9552, 0x333: 0x9552, 0x334: 0x9852, 0x335: 0x9852, + 0x336: 0x9b52, 0x337: 0x9b52, 0x338: 0x9e52, 0x339: 0x9e52, 0x33a: 0xa152, 0x33b: 0xa152, + 0x33c: 0x4d52, 0x33d: 0x4d52, + // Block 0xd, offset 0x340 + 0x340: 0x3fba, 0x341: 0x40aa, 0x342: 0x419a, 0x343: 0x428a, 0x344: 0x437a, 0x345: 0x446a, + 0x346: 0x455a, 0x347: 0x464a, 0x348: 0x4739, 0x349: 0x4829, 0x34a: 0x4919, 0x34b: 0x4a09, + 0x34c: 0x4af9, 0x34d: 0x4be9, 0x34e: 0x4cd9, 0x34f: 0x4dc9, 0x350: 0x4eba, 0x351: 0x4faa, + 0x352: 0x509a, 0x353: 0x518a, 0x354: 0x527a, 0x355: 0x536a, 0x356: 0x545a, 0x357: 0x554a, + 0x358: 0x5639, 0x359: 0x5729, 0x35a: 0x5819, 0x35b: 0x5909, 0x35c: 0x59f9, 0x35d: 0x5ae9, + 0x35e: 0x5bd9, 0x35f: 0x5cc9, 0x360: 0x5dba, 0x361: 0x5eaa, 0x362: 0x5f9a, 0x363: 0x608a, + 0x364: 0x617a, 0x365: 0x626a, 0x366: 0x635a, 0x367: 0x644a, 0x368: 0x6539, 0x369: 0x6629, + 0x36a: 0x6719, 0x36b: 0x6809, 0x36c: 0x68f9, 0x36d: 0x69e9, 0x36e: 0x6ad9, 0x36f: 0x6bc9, + 0x370: 0x0812, 0x371: 0x0812, 0x372: 0x6cba, 0x373: 0x6dca, 0x374: 0x6e9a, + 0x376: 0x6f7a, 0x377: 0x705a, 0x378: 0x0813, 0x379: 0x0813, 0x37a: 0x9253, 0x37b: 0x9253, + 0x37c: 0x7199, 0x37d: 0x0004, 0x37e: 0x726a, 0x37f: 0x0004, + // Block 0xe, offset 0x380 + 0x380: 0x0004, 0x381: 0x0004, 0x382: 0x72ea, 0x383: 0x73fa, 0x384: 0x74ca, + 0x386: 0x75aa, 0x387: 0x768a, 0x388: 0x9553, 0x389: 0x9553, 0x38a: 0x9853, 0x38b: 0x9853, + 0x38c: 0x77c9, 0x38d: 0x0004, 0x38e: 0x0004, 0x38f: 0x0004, 0x390: 0x0812, 0x391: 0x0812, + 0x392: 0x789a, 0x393: 0x79da, 0x396: 0x7b1a, 0x397: 0x7bfa, + 0x398: 0x0813, 0x399: 0x0813, 0x39a: 0x9b53, 0x39b: 0x9b53, 0x39d: 0x0004, + 0x39e: 0x0004, 0x39f: 0x0004, 0x3a0: 0x0812, 0x3a1: 0x0812, 0x3a2: 0x7d3a, 0x3a3: 0x7e7a, + 0x3a4: 0x7fba, 0x3a5: 0x0912, 0x3a6: 0x809a, 0x3a7: 0x817a, 0x3a8: 0x0813, 0x3a9: 0x0813, + 0x3aa: 0xa153, 0x3ab: 0xa153, 0x3ac: 0x0913, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, + 0x3b2: 0x82ba, 0x3b3: 0x83ca, 0x3b4: 0x849a, + 0x3b6: 0x857a, 0x3b7: 0x865a, 0x3b8: 0x9e53, 0x3b9: 0x9e53, 0x3ba: 0x4d53, 0x3bb: 0x4d53, + 0x3bc: 0x8799, 0x3bd: 0x0004, 0x3be: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3c2: 0x0013, + 0x3c7: 0x0013, 0x3ca: 0x0012, 0x3cb: 0x0013, + 0x3cc: 0x0013, 0x3cd: 0x0013, 0x3ce: 0x0012, 0x3cf: 0x0012, 0x3d0: 0x0013, 0x3d1: 0x0013, + 0x3d2: 0x0013, 0x3d3: 0x0012, 0x3d5: 0x0013, + 0x3d9: 0x0013, 0x3da: 0x0013, 0x3db: 0x0013, 0x3dc: 0x0013, 0x3dd: 0x0013, + 0x3e4: 0x0013, 0x3e6: 0x886b, 0x3e8: 0x0013, + 0x3ea: 0x88cb, 0x3eb: 0x890b, 0x3ec: 0x0013, 0x3ed: 0x0013, 0x3ef: 0x0012, + 0x3f0: 0x0013, 0x3f1: 0x0013, 0x3f2: 0xa453, 0x3f3: 0x0013, 0x3f4: 0x0012, 0x3f5: 0x0010, + 0x3f6: 0x0010, 0x3f7: 0x0010, 0x3f8: 0x0010, 0x3f9: 0x0012, + 0x3fc: 0x0012, 0x3fd: 0x0012, 0x3fe: 0x0013, 0x3ff: 0x0013, + // Block 0x10, offset 0x400 + 0x400: 0x1a13, 0x401: 0x1a13, 0x402: 0x1e13, 0x403: 0x1e13, 0x404: 0x1a13, 0x405: 0x1a13, + 0x406: 0x2613, 0x407: 0x2613, 0x408: 0x2a13, 0x409: 0x2a13, 0x40a: 0x2e13, 0x40b: 0x2e13, + 0x40c: 0x2a13, 0x40d: 0x2a13, 0x40e: 0x2613, 0x40f: 0x2613, 0x410: 0xa752, 0x411: 0xa752, + 0x412: 0xaa52, 0x413: 0xaa52, 0x414: 0xad52, 0x415: 0xad52, 0x416: 0xaa52, 0x417: 0xaa52, + 0x418: 0xa752, 0x419: 0xa752, 0x41a: 0x1a12, 0x41b: 0x1a12, 0x41c: 0x1e12, 0x41d: 0x1e12, + 0x41e: 0x1a12, 0x41f: 0x1a12, 0x420: 0x2612, 0x421: 0x2612, 0x422: 0x2a12, 0x423: 0x2a12, + 0x424: 0x2e12, 0x425: 0x2e12, 0x426: 0x2a12, 0x427: 0x2a12, 0x428: 0x2612, 0x429: 0x2612, + // Block 0x11, offset 0x440 + 0x440: 0x6552, 0x441: 0x6552, 0x442: 0x6552, 0x443: 0x6552, 0x444: 0x6552, 0x445: 0x6552, + 0x446: 0x6552, 0x447: 0x6552, 0x448: 0x6552, 0x449: 0x6552, 0x44a: 0x6552, 0x44b: 0x6552, + 0x44c: 0x6552, 0x44d: 0x6552, 0x44e: 0x6552, 0x44f: 0x6552, 0x450: 0xb052, 0x451: 0xb052, + 0x452: 0xb052, 0x453: 0xb052, 0x454: 0xb052, 0x455: 0xb052, 0x456: 0xb052, 0x457: 0xb052, + 0x458: 0xb052, 0x459: 0xb052, 0x45a: 0xb052, 0x45b: 0xb052, 0x45c: 0xb052, 0x45d: 0xb052, + 0x45e: 0xb052, 0x460: 0x0113, 0x461: 0x0112, 0x462: 0x896b, 0x463: 0x8b53, + 0x464: 0x89cb, 0x465: 0x8a2a, 0x466: 0x8a8a, 0x467: 0x0f13, 0x468: 0x0f12, 0x469: 0x0313, + 0x46a: 0x0312, 0x46b: 0x0713, 0x46c: 0x0712, 0x46d: 0x8aeb, 0x46e: 0x8b4b, 0x46f: 0x8bab, + 0x470: 0x8c0b, 0x471: 0x0012, 0x472: 0x0113, 0x473: 0x0112, 0x474: 0x0012, 0x475: 0x0313, + 0x476: 0x0312, 0x477: 0x0012, 0x478: 0x0012, 0x479: 0x0012, 0x47a: 0x0012, 0x47b: 0x0012, + 0x47c: 0x0015, 0x47d: 0x0015, 0x47e: 0x8c6b, 0x47f: 0x8ccb, + // Block 0x12, offset 0x480 + 0x480: 0x0113, 0x481: 0x0112, 0x482: 0x0113, 0x483: 0x0112, 0x484: 0x0113, 0x485: 0x0112, + 0x486: 0x0113, 0x487: 0x0112, 0x488: 0x0014, 0x489: 0x0014, 0x48a: 0x0014, 0x48b: 0x0713, + 0x48c: 0x0712, 0x48d: 0x8d2b, 0x48e: 0x0012, 0x48f: 0x0010, 0x490: 0x0113, 0x491: 0x0112, + 0x492: 0x0113, 0x493: 0x0112, 0x494: 0x6552, 0x495: 0x0012, 0x496: 0x0113, 0x497: 0x0112, + 0x498: 0x0113, 0x499: 0x0112, 0x49a: 0x0113, 0x49b: 0x0112, 0x49c: 0x0113, 0x49d: 0x0112, + 0x49e: 0x0113, 0x49f: 0x0112, 0x4a0: 0x0113, 0x4a1: 0x0112, 0x4a2: 0x0113, 0x4a3: 0x0112, + 0x4a4: 0x0113, 0x4a5: 0x0112, 0x4a6: 0x0113, 0x4a7: 0x0112, 0x4a8: 0x0113, 0x4a9: 0x0112, + 0x4aa: 0x8d8b, 0x4ab: 0x8deb, 0x4ac: 0x8e4b, 0x4ad: 0x8eab, 0x4ae: 0x8f0b, 0x4af: 0x0012, + 0x4b0: 0x8f6b, 0x4b1: 0x8fcb, 0x4b2: 0x902b, 0x4b3: 0xb353, 0x4b4: 0x0113, 0x4b5: 0x0112, + 0x4b6: 0x0113, 0x4b7: 0x0112, 0x4b8: 0x0113, 0x4b9: 0x0112, 0x4ba: 0x0113, 0x4bb: 0x0112, + 0x4bc: 0x0113, 0x4bd: 0x0112, 0x4be: 0x0113, 0x4bf: 0x0112, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x90ea, 0x4c1: 0x916a, 0x4c2: 0x91ea, 0x4c3: 0x926a, 0x4c4: 0x931a, 0x4c5: 0x93ca, + 0x4c6: 0x944a, + 0x4d3: 0x94ca, 0x4d4: 0x95aa, 0x4d5: 0x968a, 0x4d6: 0x976a, 0x4d7: 0x984a, + 0x4dd: 0x0010, + 0x4de: 0x0034, 0x4df: 0x0010, 0x4e0: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, 0x4e3: 0x0010, + 0x4e4: 0x0010, 0x4e5: 0x0010, 0x4e6: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, + 0x4ea: 0x0010, 0x4eb: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f3: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f8: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, + // Block 0x14, offset 0x500 + 0x500: 0x2213, 0x501: 0x2213, 0x502: 0x2613, 0x503: 0x2613, 0x504: 0x2213, 0x505: 0x2213, + 0x506: 0x2e13, 0x507: 0x2e13, 0x508: 0x2213, 0x509: 0x2213, 0x50a: 0x2613, 0x50b: 0x2613, + 0x50c: 0x2213, 0x50d: 0x2213, 0x50e: 0x3e13, 0x50f: 0x3e13, 0x510: 0x2213, 0x511: 0x2213, + 0x512: 0x2613, 0x513: 0x2613, 0x514: 0x2213, 0x515: 0x2213, 0x516: 0x2e13, 0x517: 0x2e13, + 0x518: 0x2213, 0x519: 0x2213, 0x51a: 0x2613, 0x51b: 0x2613, 0x51c: 0x2213, 0x51d: 0x2213, + 0x51e: 0xbc53, 0x51f: 0xbc53, 0x520: 0xbf53, 0x521: 0xbf53, 0x522: 0x2212, 0x523: 0x2212, + 0x524: 0x2612, 0x525: 0x2612, 0x526: 0x2212, 0x527: 0x2212, 0x528: 0x2e12, 0x529: 0x2e12, + 0x52a: 0x2212, 0x52b: 0x2212, 0x52c: 0x2612, 0x52d: 0x2612, 0x52e: 0x2212, 0x52f: 0x2212, + 0x530: 0x3e12, 0x531: 0x3e12, 0x532: 0x2212, 0x533: 0x2212, 0x534: 0x2612, 0x535: 0x2612, + 0x536: 0x2212, 0x537: 0x2212, 0x538: 0x2e12, 0x539: 0x2e12, 0x53a: 0x2212, 0x53b: 0x2212, + 0x53c: 0x2612, 0x53d: 0x2612, 0x53e: 0x2212, 0x53f: 0x2212, + // Block 0x15, offset 0x540 + 0x542: 0x0010, + 0x547: 0x0010, 0x549: 0x0010, 0x54b: 0x0010, + 0x54d: 0x0010, 0x54e: 0x0010, 0x54f: 0x0010, 0x551: 0x0010, + 0x552: 0x0010, 0x554: 0x0010, 0x557: 0x0010, + 0x559: 0x0010, 0x55b: 0x0010, 0x55d: 0x0010, + 0x55f: 0x0010, 0x561: 0x0010, 0x562: 0x0010, + 0x564: 0x0010, 0x567: 0x0010, 0x568: 0x0010, 0x569: 0x0010, + 0x56a: 0x0010, 0x56c: 0x0010, 0x56d: 0x0010, 0x56e: 0x0010, 0x56f: 0x0010, + 0x570: 0x0010, 0x571: 0x0010, 0x572: 0x0010, 0x574: 0x0010, 0x575: 0x0010, + 0x576: 0x0010, 0x577: 0x0010, 0x579: 0x0010, 0x57a: 0x0010, 0x57b: 0x0010, + 0x57c: 0x0010, 0x57e: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x14, 0xc3: 0x15, 0xc4: 0x16, 0xc5: 0x17, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x18, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x19, 0xcc: 0x1a, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x1b, 0xd1: 0x1c, 0xd2: 0x1d, 0xd3: 0x1e, 0xd4: 0x1f, 0xd5: 0x20, 0xd6: 0x08, 0xd7: 0x21, + 0xd8: 0x22, 0xd9: 0x23, 0xda: 0x24, 0xdb: 0x25, 0xdc: 0x26, 0xdd: 0x27, 0xde: 0x28, 0xdf: 0x29, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x2a, 0x121: 0x2b, 0x122: 0x2c, 0x123: 0x2d, 0x124: 0x2e, 0x125: 0x2f, 0x126: 0x30, 0x127: 0x31, + 0x128: 0x32, 0x129: 0x33, 0x12a: 0x34, 0x12b: 0x35, 0x12c: 0x36, 0x12d: 0x37, 0x12e: 0x38, 0x12f: 0x39, + 0x130: 0x3a, 0x131: 0x3b, 0x132: 0x3c, 0x133: 0x3d, 0x134: 0x3e, 0x135: 0x3f, 0x136: 0x40, 0x137: 0x41, + 0x138: 0x42, 0x139: 0x43, 0x13a: 0x44, 0x13b: 0x45, 0x13c: 0x46, 0x13d: 0x47, 0x13e: 0x48, 0x13f: 0x49, + // Block 0x5, offset 0x140 + 0x140: 0x4a, 0x141: 0x4b, 0x142: 0x4c, 0x143: 0x09, 0x144: 0x24, 0x145: 0x24, 0x146: 0x24, 0x147: 0x24, + 0x148: 0x24, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x24, 0x152: 0x24, 0x153: 0x24, 0x154: 0x24, 0x155: 0x24, 0x156: 0x24, 0x157: 0x24, + 0x158: 0x24, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x0a, 0x17e: 0x0b, 0x17f: 0x0c, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0d, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0e, + 0x1b0: 0x7c, 0x1b1: 0x0f, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x24, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x24, 0x202: 0x24, 0x203: 0x24, 0x204: 0x24, 0x205: 0x24, 0x206: 0x24, 0x207: 0x24, + 0x208: 0x24, 0x209: 0x24, 0x20a: 0x24, 0x20b: 0x24, 0x20c: 0x24, 0x20d: 0x24, 0x20e: 0x24, 0x20f: 0x24, + 0x210: 0x24, 0x211: 0x24, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x24, 0x215: 0x24, 0x216: 0x24, 0x217: 0x24, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x10, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x24, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x24, 0x231: 0x24, 0x232: 0x24, 0x233: 0x24, 0x234: 0x24, 0x235: 0x24, 0x236: 0x24, 0x237: 0x24, + 0x238: 0x24, 0x239: 0x24, 0x23a: 0x24, 0x23b: 0x24, 0x23c: 0x24, 0x23d: 0x24, 0x23e: 0x24, 0x23f: 0x24, + // Block 0x9, offset 0x240 + 0x240: 0x24, 0x241: 0x24, 0x242: 0x24, 0x243: 0x24, 0x244: 0x24, 0x245: 0x24, 0x246: 0x24, 0x247: 0x24, + 0x248: 0x24, 0x249: 0x24, 0x24a: 0x24, 0x24b: 0x24, 0x24c: 0x24, 0x24d: 0x24, 0x24e: 0x24, 0x24f: 0x24, + 0x250: 0x24, 0x251: 0x24, 0x252: 0x24, 0x253: 0x24, 0x254: 0x24, 0x255: 0x24, 0x256: 0x24, 0x257: 0x24, + 0x258: 0x24, 0x259: 0x24, 0x25a: 0x24, 0x25b: 0x24, 0x25c: 0x24, 0x25d: 0x24, 0x25e: 0x24, 0x25f: 0x24, + 0x260: 0x24, 0x261: 0x24, 0x262: 0x24, 0x263: 0x24, 0x264: 0x24, 0x265: 0x24, 0x266: 0x24, 0x267: 0x24, + 0x268: 0x24, 0x269: 0x24, 0x26a: 0x24, 0x26b: 0x24, 0x26c: 0x24, 0x26d: 0x24, 0x26e: 0x24, 0x26f: 0x24, + 0x270: 0x24, 0x271: 0x24, 0x272: 0x24, 0x273: 0x24, 0x274: 0x24, 0x275: 0x24, 0x276: 0x24, 0x277: 0x24, + 0x278: 0x24, 0x279: 0x24, 0x27a: 0x24, 0x27b: 0x24, 0x27c: 0x24, 0x27d: 0x24, 0x27e: 0x24, 0x27f: 0x24, + // Block 0xa, offset 0x280 + 0x280: 0x24, 0x281: 0x24, 0x282: 0x24, 0x283: 0x24, 0x284: 0x24, 0x285: 0x24, 0x286: 0x24, 0x287: 0x24, + 0x288: 0x24, 0x289: 0x24, 0x28a: 0x24, 0x28b: 0x24, 0x28c: 0x24, 0x28d: 0x24, 0x28e: 0x24, 0x28f: 0x24, + 0x290: 0x24, 0x291: 0x24, 0x292: 0x24, 0x293: 0x24, 0x294: 0x24, 0x295: 0x24, 0x296: 0x24, 0x297: 0x24, + 0x298: 0x24, 0x299: 0x24, 0x29a: 0x24, 0x29b: 0x24, 0x29c: 0x24, 0x29d: 0x24, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x11, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x24, 0x2f1: 0x24, 0x2f2: 0x24, 0x2f3: 0x24, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x24, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x24, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x24, 0x319: 0x24, 0x31a: 0x24, 0x31b: 0x24, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x24, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, 0x334: 0xd3, + 0x33c: 0xd4, 0x33d: 0xd5, 0x33f: 0xd6, + // Block 0xd, offset 0x340 + 0x340: 0xd7, 0x341: 0xd8, 0x342: 0xd9, 0x343: 0xda, 0x344: 0xdb, 0x345: 0xdc, 0x346: 0xdd, 0x347: 0xde, + 0x348: 0xdf, 0x34a: 0xe0, 0x34b: 0xe1, 0x34c: 0xe2, 0x34d: 0xe3, + 0x350: 0xe4, 0x351: 0xe5, 0x352: 0xe6, 0x353: 0xe7, 0x356: 0xe8, 0x357: 0xe9, + 0x358: 0xea, 0x359: 0xeb, 0x35a: 0xec, 0x35b: 0xed, 0x35c: 0xee, + 0x360: 0xef, 0x362: 0xf0, 0x363: 0xf1, 0x366: 0xf2, 0x367: 0xf3, + 0x368: 0xf4, 0x369: 0xf5, 0x36a: 0xf6, 0x36b: 0xf7, + 0x370: 0xf8, 0x371: 0xf9, 0x372: 0xfa, 0x374: 0xfb, 0x375: 0xfc, 0x376: 0xfd, + 0x37b: 0xfe, + // Block 0xe, offset 0x380 + 0x380: 0x24, 0x381: 0x24, 0x382: 0x24, 0x383: 0x24, 0x384: 0x24, 0x385: 0x24, 0x386: 0x24, 0x387: 0x24, + 0x388: 0x24, 0x389: 0x24, 0x38a: 0x24, 0x38b: 0x24, 0x38c: 0x24, 0x38d: 0x24, 0x38e: 0xff, + 0x390: 0x24, 0x391: 0x100, 0x392: 0x24, 0x393: 0x24, 0x394: 0x24, 0x395: 0x101, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x24, 0x3c1: 0x24, 0x3c2: 0x24, 0x3c3: 0x24, 0x3c4: 0x24, 0x3c5: 0x24, 0x3c6: 0x24, 0x3c7: 0x24, + 0x3c8: 0x24, 0x3c9: 0x24, 0x3ca: 0x24, 0x3cb: 0x24, 0x3cc: 0x24, 0x3cd: 0x24, 0x3ce: 0x24, 0x3cf: 0x24, + 0x3d0: 0x102, + // Block 0x10, offset 0x400 + 0x410: 0x24, 0x411: 0x24, 0x412: 0x24, 0x413: 0x24, 0x414: 0x24, 0x415: 0x24, 0x416: 0x24, 0x417: 0x24, + 0x418: 0x24, 0x419: 0x103, + // Block 0x11, offset 0x440 + 0x460: 0x24, 0x461: 0x24, 0x462: 0x24, 0x463: 0x24, 0x464: 0x24, 0x465: 0x24, 0x466: 0x24, 0x467: 0x24, + 0x468: 0xf7, 0x469: 0x104, 0x46b: 0x105, 0x46c: 0x106, 0x46d: 0x107, 0x46e: 0x108, + 0x479: 0x109, 0x47c: 0x24, 0x47d: 0x10a, 0x47e: 0x10b, 0x47f: 0x10c, + // Block 0x12, offset 0x480 + 0x4b0: 0x24, 0x4b1: 0x10d, 0x4b2: 0x10e, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x10f, 0x4c6: 0x110, + 0x4c9: 0x111, + 0x4d0: 0x112, 0x4d1: 0x113, 0x4d2: 0x114, 0x4d3: 0x115, 0x4d4: 0x116, 0x4d5: 0x117, 0x4d6: 0x118, 0x4d7: 0x119, + 0x4d8: 0x11a, 0x4d9: 0x11b, 0x4da: 0x11c, 0x4db: 0x11d, 0x4dc: 0x11e, 0x4dd: 0x11f, 0x4de: 0x120, 0x4df: 0x121, + 0x4e8: 0x122, 0x4e9: 0x123, 0x4ea: 0x124, + // Block 0x14, offset 0x500 + 0x500: 0x125, 0x504: 0x126, 0x505: 0x127, + 0x50b: 0x128, + 0x520: 0x24, 0x521: 0x24, 0x522: 0x24, 0x523: 0x129, 0x524: 0x12, 0x525: 0x12a, + 0x538: 0x12b, 0x539: 0x13, 0x53a: 0x12c, + // Block 0x15, offset 0x540 + 0x544: 0x12d, 0x545: 0x12e, 0x546: 0x12f, + 0x54f: 0x130, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x131, 0x5c1: 0x132, 0x5c4: 0x132, 0x5c5: 0x132, 0x5c6: 0x132, 0x5c7: 0x133, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 289 entries, 578 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x35, 0x38, 0x3c, 0x3f, 0x43, 0x4d, 0x4f, 0x57, 0x5e, 0x63, 0x71, 0x72, 0x80, 0x8f, 0x99, 0x9c, 0xa3, 0xab, 0xae, 0xb0, 0xbf, 0xc5, 0xd3, 0xde, 0xeb, 0xf6, 0x102, 0x10c, 0x118, 0x123, 0x12f, 0x13b, 0x143, 0x14c, 0x156, 0x161, 0x16d, 0x174, 0x17f, 0x184, 0x18c, 0x18f, 0x194, 0x198, 0x19c, 0x1a3, 0x1ac, 0x1b4, 0x1b5, 0x1be, 0x1c5, 0x1cd, 0x1d3, 0x1d8, 0x1dc, 0x1df, 0x1e1, 0x1e4, 0x1e9, 0x1ea, 0x1ec, 0x1ee, 0x1f0, 0x1f7, 0x1fc, 0x200, 0x209, 0x20c, 0x20f, 0x215, 0x216, 0x221, 0x222, 0x223, 0x228, 0x235, 0x23d, 0x245, 0x24e, 0x257, 0x260, 0x265, 0x268, 0x273, 0x281, 0x283, 0x28a, 0x28e, 0x29a, 0x29b, 0x2a6, 0x2ae, 0x2b6, 0x2bc, 0x2bd, 0x2cb, 0x2d0, 0x2d3, 0x2d8, 0x2dc, 0x2e2, 0x2e7, 0x2ea, 0x2ef, 0x2f4, 0x2f5, 0x2fb, 0x2fd, 0x2fe, 0x300, 0x302, 0x305, 0x306, 0x308, 0x30b, 0x311, 0x315, 0x317, 0x31c, 0x323, 0x32b, 0x334, 0x335, 0x33e, 0x342, 0x347, 0x34f, 0x355, 0x35b, 0x365, 0x36a, 0x373, 0x379, 0x380, 0x384, 0x38c, 0x38e, 0x390, 0x393, 0x395, 0x397, 0x398, 0x399, 0x39b, 0x39d, 0x3a3, 0x3a8, 0x3aa, 0x3b1, 0x3b4, 0x3b6, 0x3bc, 0x3c1, 0x3c3, 0x3c4, 0x3c5, 0x3c6, 0x3c8, 0x3ca, 0x3cc, 0x3cf, 0x3d1, 0x3d4, 0x3dc, 0x3df, 0x3e3, 0x3eb, 0x3ed, 0x3ee, 0x3ef, 0x3f1, 0x3f7, 0x3f9, 0x3fa, 0x3fc, 0x3fe, 0x400, 0x40d, 0x40e, 0x40f, 0x413, 0x415, 0x416, 0x417, 0x418, 0x419, 0x41c, 0x41f, 0x425, 0x426, 0x42a, 0x42e, 0x434, 0x437, 0x43e, 0x442, 0x446, 0x44d, 0x456, 0x45c, 0x462, 0x46c, 0x476, 0x478, 0x481, 0x487, 0x48d, 0x493, 0x496, 0x49c, 0x49f, 0x4a8, 0x4a9, 0x4b0, 0x4b4, 0x4b5, 0x4b8, 0x4ba, 0x4c1, 0x4c9, 0x4cf, 0x4d5, 0x4d6, 0x4dc, 0x4df, 0x4e7, 0x4ee, 0x4f8, 0x500, 0x503, 0x504, 0x505, 0x506, 0x508, 0x509, 0x50b, 0x50d, 0x50f, 0x513, 0x514, 0x516, 0x519, 0x51b, 0x51d, 0x51f, 0x524, 0x529, 0x52d, 0x52e, 0x531, 0x535, 0x540, 0x544, 0x54c, 0x551, 0x555, 0x558, 0x55c, 0x55f, 0x562, 0x567, 0x56b, 0x56f, 0x573, 0x577, 0x579, 0x57b, 0x57e, 0x583, 0x586, 0x588, 0x58b, 0x58d, 0x593, 0x59c, 0x5a1, 0x5a2, 0x5a5, 0x5a6, 0x5a7, 0x5a9, 0x5aa, 0x5ab} + +// sparseValues: 1451 entries, 5804 bytes +var sparseValues = [1451]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xbf}, + // Block 0x6, offset 0x35 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x38 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3c + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3f + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x43 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4d + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4f + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9b, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0xa0, hi: 0xa0}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x57 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xaf, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xe, offset 0x5e + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xf, offset 0x63 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x10, offset 0x71 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x11, offset 0x72 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x12, offset 0x80 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x14, offset 0x99 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x15, offset 0x9c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0x16, offset 0xa3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x17, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x18, offset 0xae + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x19, offset 0xb0 + {value: 0x0034, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1a, offset 0xbf + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1b, offset 0xc5 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1c, offset 0xd3 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0xde + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xeb + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x1f, offset 0xf6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x20, offset 0x102 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x21, offset 0x10c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x22, offset 0x118 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x23, offset 0x123 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x24, offset 0x12f + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x26, offset 0x143 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x27, offset 0x14c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x28, offset 0x156 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2a, offset 0x16d + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2b, offset 0x174 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2c, offset 0x17f + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2d, offset 0x184 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2e, offset 0x18c + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x2f, offset 0x18f + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x30, offset 0x194 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x31, offset 0x198 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x32, offset 0x19c + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x33, offset 0x1a3 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x34, offset 0x1ac + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x35, offset 0x1b4 + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x36, offset 0x1b5 + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x37, offset 0x1be + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x38, offset 0x1c5 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x39, offset 0x1cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3b, offset 0x1d8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3c, offset 0x1dc + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3d, offset 0x1df + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x3e, offset 0x1e1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x3f, offset 0x1e4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x40, offset 0x1e9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x41, offset 0x1ea + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x42, offset 0x1ec + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x43, offset 0x1ee + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x44, offset 0x1f0 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x45, offset 0x1f7 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x46, offset 0x1fc + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x47, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x48, offset 0x209 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x20c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb8}, + // Block 0x4a, offset 0x20f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4b, offset 0x215 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4c, offset 0x216 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x221 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x4e, offset 0x222 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x4f, offset 0x223 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x50, offset 0x228 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x51, offset 0x235 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x52, offset 0x23d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x54, offset 0x24e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x55, offset 0x257 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x56, offset 0x260 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x57, offset 0x265 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x58, offset 0x268 + {value: 0x31ea, lo: 0x80, hi: 0x80}, + {value: 0x326a, lo: 0x81, hi: 0x81}, + {value: 0x32ea, lo: 0x82, hi: 0x82}, + {value: 0x336a, lo: 0x83, hi: 0x83}, + {value: 0x33ea, lo: 0x84, hi: 0x84}, + {value: 0x346a, lo: 0x85, hi: 0x85}, + {value: 0x34ea, lo: 0x86, hi: 0x86}, + {value: 0x356a, lo: 0x87, hi: 0x87}, + {value: 0x35ea, lo: 0x88, hi: 0x88}, + {value: 0x8353, lo: 0x90, hi: 0xba}, + {value: 0x8353, lo: 0xbd, hi: 0xbf}, + // Block 0x59, offset 0x273 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xba}, + // Block 0x5a, offset 0x281 + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5b, offset 0x283 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8752, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8b52, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5c, offset 0x28a + {value: 0x0012, lo: 0x80, hi: 0x8d}, + {value: 0x8f52, lo: 0x8e, hi: 0x8e}, + {value: 0x0012, lo: 0x8f, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5d, offset 0x28e + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x5e, offset 0x29a + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x29b + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x369a, lo: 0x96, hi: 0x96}, + {value: 0x374a, lo: 0x97, hi: 0x97}, + {value: 0x37fa, lo: 0x98, hi: 0x98}, + {value: 0x38aa, lo: 0x99, hi: 0x99}, + {value: 0x395a, lo: 0x9a, hi: 0x9a}, + {value: 0x3a0a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x3abb, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x60, offset 0x2a6 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x61, offset 0x2ae + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x2b6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x2bc + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x64, offset 0x2bd + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x65, offset 0x2cb + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0xa452, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x2d0 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x67, offset 0x2d3 + {value: 0xa753, lo: 0xb6, hi: 0xb7}, + {value: 0xaa53, lo: 0xb8, hi: 0xb9}, + {value: 0xad53, lo: 0xba, hi: 0xbb}, + {value: 0xaa53, lo: 0xbc, hi: 0xbd}, + {value: 0xa753, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x2d8 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xb053, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x69, offset 0x2dc + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6a, offset 0x2e2 + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e7 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6c, offset 0x2ea + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6d, offset 0x2ef + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x6f, offset 0x2f5 + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x70, offset 0x2fb + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x71, offset 0x2fd + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x72, offset 0x2fe + {value: 0x0010, lo: 0x85, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x73, offset 0x300 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x74, offset 0x302 + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x75, offset 0x305 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x76, offset 0x306 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x77, offset 0x308 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x78, offset 0x30b + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x79, offset 0x311 + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7a, offset 0x315 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7b, offset 0x317 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7c, offset 0x31c + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8753, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7d, offset 0x323 + {value: 0x0117, lo: 0x82, hi: 0x83}, + {value: 0x6553, lo: 0x84, hi: 0x84}, + {value: 0x908b, lo: 0x85, hi: 0x85}, + {value: 0x8f53, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x7e, offset 0x32b + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x7f, offset 0x334 + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x80, offset 0x335 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x33e + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x82, offset 0x342 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x83, offset 0x347 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x84, offset 0x34f + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x85, offset 0x355 + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x86, offset 0x35b + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x87, offset 0x365 + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x36a + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x89, offset 0x373 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8a, offset 0x379 + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xb352, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa7}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8b, offset 0x380 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x384 + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8d, offset 0x38c + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x38e + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x8f, offset 0x390 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x90, offset 0x393 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x91, offset 0x395 + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x92, offset 0x397 + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x93, offset 0x398 + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x94, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x95, offset 0x39b + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x96, offset 0x39d + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x97, offset 0x3a3 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x98, offset 0x3a8 + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x3aa + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x3b1 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9b, offset 0x3b4 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9c, offset 0x3b6 + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9d, offset 0x3bc + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x3c1 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0x9f, offset 0x3c3 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa0, offset 0x3c4 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa1, offset 0x3c5 + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa2, offset 0x3c6 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa4, offset 0x3ca + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa5, offset 0x3cc + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa6, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa7, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xa8, offset 0x3d4 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xb653, lo: 0x98, hi: 0x9f}, + {value: 0xb953, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xa9, offset 0x3dc + {value: 0xb652, lo: 0x80, hi: 0x87}, + {value: 0xb952, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xaa, offset 0x3df + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb953, lo: 0xb0, hi: 0xb7}, + {value: 0xb653, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e3 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb952, lo: 0x98, hi: 0x9f}, + {value: 0xb652, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xac, offset 0x3eb + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xad, offset 0x3ed + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xae, offset 0x3ee + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xaf, offset 0x3ef + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb0, offset 0x3f1 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x3f7 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb2, offset 0x3f9 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb3, offset 0x3fa + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb4, offset 0x3fc + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb5, offset 0x3fe + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb6, offset 0x400 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb5}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb7, offset 0x40d + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xb8, offset 0x40e + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xb9, offset 0x40f + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xba, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbb, offset 0x415 + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbc, offset 0x416 + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbd, offset 0x417 + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xbe, offset 0x418 + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xbf, offset 0x419 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc0, offset 0x41c + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc1, offset 0x41f + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x87}, + {value: 0x0024, lo: 0x88, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x8b}, + {value: 0x0024, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + // Block 0xc2, offset 0x425 + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xc3, offset 0x426 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc4, offset 0x42a + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc5, offset 0x42e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc6, offset 0x434 + {value: 0x0014, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc7, offset 0x437 + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc8, offset 0x43e + {value: 0x0010, lo: 0x84, hi: 0x86}, + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc9, offset 0x442 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x446 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x89, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xcb, offset 0x44d + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xcc, offset 0x456 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcd, offset 0x45c + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xce, offset 0x462 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcf, offset 0x46c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xd0, offset 0x476 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xd1, offset 0x478 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0x9f, hi: 0x9f}, + // Block 0xd2, offset 0x481 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x487 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd4, offset 0x48d + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x493 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd6, offset 0x496 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd7, offset 0x49c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd8, offset 0x49f + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + // Block 0xd9, offset 0x4a8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xda, offset 0x4a9 + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xdb, offset 0x4b0 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + // Block 0xdc, offset 0x4b4 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xdd, offset 0x4b5 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x4b8 + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xdf, offset 0x4ba + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0014, lo: 0x94, hi: 0x97}, + {value: 0x0014, lo: 0x9a, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0x9f}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + // Block 0xe0, offset 0x4c1 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xe1, offset 0x4c9 + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xe2, offset 0x4cf + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + // Block 0xe3, offset 0x4d5 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xe4, offset 0x4d6 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe5, offset 0x4dc + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x4df + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xe7, offset 0x4e7 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xe8, offset 0x4ee + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa5}, + {value: 0x0010, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xe9, offset 0x4f8 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0x96}, + {value: 0x0034, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xea, offset 0x500 + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + // Block 0xeb, offset 0x503 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xec, offset 0x504 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xed, offset 0x505 + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xee, offset 0x506 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xb0, hi: 0xb8}, + // Block 0xef, offset 0x508 + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xf0, offset 0x509 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xf1, offset 0x50b + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xf2, offset 0x50d + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xf3, offset 0x50f + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xf4, offset 0x513 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xf5, offset 0x514 + {value: 0x2013, lo: 0x80, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xbf}, + // Block 0xf6, offset 0x516 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x519 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xf8, offset 0x51b + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa3, hi: 0xa3}, + // Block 0xf9, offset 0x51d + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xfa, offset 0x51f + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xfb, offset 0x524 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xfc, offset 0x529 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xfd, offset 0x52d + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xfe, offset 0x52e + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xff, offset 0x531 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x100, offset 0x535 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0x101, offset 0x540 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x102, offset 0x544 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0x103, offset 0x54c + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0x104, offset 0x551 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0x105, offset 0x555 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0x106, offset 0x558 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0x107, offset 0x55c + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x108, offset 0x55f + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x109, offset 0x562 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x10a, offset 0x567 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x10b, offset 0x56b + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x10c, offset 0x56f + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x10d, offset 0x573 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x10e, offset 0x577 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x10f, offset 0x579 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x110, offset 0x57b + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x111, offset 0x57e + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x112, offset 0x583 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + // Block 0x113, offset 0x586 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + // Block 0x114, offset 0x588 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0024, lo: 0xac, hi: 0xaf}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x115, offset 0x58b + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x116, offset 0x58d + {value: 0xbc52, lo: 0x80, hi: 0x81}, + {value: 0xbf52, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x117, offset 0x593 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x118, offset 0x59c + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x119, offset 0x5a1 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x11a, offset 0x5a2 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x11b, offset 0x5a5 + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x11c, offset 0x5a6 + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x11d, offset 0x5a7 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x11e, offset 0x5a9 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x11f, offset 0x5aa + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 15070 bytes (14KiB); checksum: 1EB13752 diff --git a/vendor/golang.org/x/text/cases/tables13.0.0.go b/vendor/golang.org/x/text/cases/tables13.0.0.go new file mode 100644 index 00000000..6187e6b4 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables13.0.0.go @@ -0,0 +1,2399 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.16 && !go1.21 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +var xorData string = "" + // Size: 192 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x001\x00\x00\x0b(\x04\x00\x03\x04\x1e\x00\x0b)\x08" + + "\x00\x03\x0a\x00\x02:\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<" + + "\x00\x01&\x00\x01*\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01" + + "\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2450 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꟅꟅ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ι" + + "ΙΙ\x166ΐΪ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12" + + "φΦΦ\x12\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x10\x1bᲐა" + + "\x10\x1bᲑბ\x10\x1bᲒგ\x10\x1bᲓდ\x10\x1bᲔე\x10\x1bᲕვ\x10\x1bᲖზ\x10\x1bᲗთ" + + "\x10\x1bᲘი\x10\x1bᲙკ\x10\x1bᲚლ\x10\x1bᲛმ\x10\x1bᲜნ\x10\x1bᲝო\x10\x1bᲞპ" + + "\x10\x1bᲟჟ\x10\x1bᲠრ\x10\x1bᲡს\x10\x1bᲢტ\x10\x1bᲣუ\x10\x1bᲤფ\x10\x1bᲥქ" + + "\x10\x1bᲦღ\x10\x1bᲧყ\x10\x1bᲨშ\x10\x1bᲩჩ\x10\x1bᲪც\x10\x1bᲫძ\x10\x1bᲬწ" + + "\x10\x1bᲭჭ\x10\x1bᲮხ\x10\x1bᲯჯ\x10\x1bᲰჰ\x10\x1bᲱჱ\x10\x1bᲲჲ\x10\x1bᲳჳ" + + "\x10\x1bᲴჴ\x10\x1bᲵჵ\x10\x1bᲶჶ\x10\x1bᲷჷ\x10\x1bᲸჸ\x10\x1bᲹჹ\x10\x1bᲺჺ" + + "\x10\x1bᲽჽ\x10\x1bᲾჾ\x10\x1bᲿჿ\x12\x12вВВ\x12\x12дДД\x12\x12оОО\x12\x12с" + + "СС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13\x1bꙋꙊꙊ\x13\x1bẖH̱H̱" + + "\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1baʾAʾAʾ\x13\x1bṡṠṠ\x12" + + "\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ" + + "\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ" + + "\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ\x15\x1dἄιᾄἌΙ\x15" + + "\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ\x15+ἢιἪΙᾚ\x15+ἣι" + + "ἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨΙ\x15\x1dἡιᾑἩΙ" + + "\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15\x1dἦιᾖἮΙ\x15" + + "\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ\x15+ὥιὭΙᾭ" + + "\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ\x15\x1dὣιᾣὫΙ" + + "\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰιᾺΙᾺͅ\x14#αιΑΙ" + + "ᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x12ιΙΙ\x15-ὴιῊΙ" + + "Ὴͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ\x166ῒΙ" + + "̈̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ̀\x166ΰΫ́Ϋ" + + "́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ" + + "\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12" + + "\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12" + + "\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12" + + "\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x10ʂʂ\x12\x12ffFFFf" + + "\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12st" + + "STSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄ" + + "խ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 12538 bytes (12.24 KiB). Checksum: af4dfa7d60c71d4c. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 20: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 20 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 22 blocks, 1408 entries, 2816 bytes +// The third block is the zero block. +var caseValues = [1408]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x110a, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x118a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x120a, + 0x19e: 0x128a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x130d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x138a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x14ca, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x160a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x168a, 0x251: 0x170a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x178a, 0x256: 0x180a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x188a, 0x271: 0x190a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x198a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x6852, 0x281: 0x6852, 0x282: 0x6852, 0x283: 0x6852, 0x284: 0x6852, 0x285: 0x6852, + 0x286: 0x6852, 0x287: 0x1a0a, 0x288: 0x0012, 0x28a: 0x0010, + 0x291: 0x0034, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0034, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0034, 0x29b: 0x0034, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, 0x2a0: 0x0024, 0x2a1: 0x0024, 0x2a2: 0x0034, 0x2a3: 0x0034, + 0x2a4: 0x0034, 0x2a5: 0x0034, 0x2a6: 0x0034, 0x2a7: 0x0034, 0x2a8: 0x0024, 0x2a9: 0x0024, + 0x2aa: 0x0034, 0x2ab: 0x0024, 0x2ac: 0x0024, 0x2ad: 0x0034, 0x2ae: 0x0034, 0x2af: 0x0024, + 0x2b0: 0x0034, 0x2b1: 0x0034, 0x2b2: 0x0034, 0x2b3: 0x0034, 0x2b4: 0x0034, 0x2b5: 0x0034, + 0x2b6: 0x0034, 0x2b7: 0x0034, 0x2b8: 0x0034, 0x2b9: 0x0034, 0x2ba: 0x0034, 0x2bb: 0x0034, + 0x2bc: 0x0034, 0x2bd: 0x0034, 0x2bf: 0x0034, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x7053, 0x2c1: 0x7053, 0x2c2: 0x7053, 0x2c3: 0x7053, 0x2c4: 0x7053, 0x2c5: 0x7053, + 0x2c7: 0x7053, + 0x2cd: 0x7053, 0x2d0: 0x1aea, 0x2d1: 0x1b6a, + 0x2d2: 0x1bea, 0x2d3: 0x1c6a, 0x2d4: 0x1cea, 0x2d5: 0x1d6a, 0x2d6: 0x1dea, 0x2d7: 0x1e6a, + 0x2d8: 0x1eea, 0x2d9: 0x1f6a, 0x2da: 0x1fea, 0x2db: 0x206a, 0x2dc: 0x20ea, 0x2dd: 0x216a, + 0x2de: 0x21ea, 0x2df: 0x226a, 0x2e0: 0x22ea, 0x2e1: 0x236a, 0x2e2: 0x23ea, 0x2e3: 0x246a, + 0x2e4: 0x24ea, 0x2e5: 0x256a, 0x2e6: 0x25ea, 0x2e7: 0x266a, 0x2e8: 0x26ea, 0x2e9: 0x276a, + 0x2ea: 0x27ea, 0x2eb: 0x286a, 0x2ec: 0x28ea, 0x2ed: 0x296a, 0x2ee: 0x29ea, 0x2ef: 0x2a6a, + 0x2f0: 0x2aea, 0x2f1: 0x2b6a, 0x2f2: 0x2bea, 0x2f3: 0x2c6a, 0x2f4: 0x2cea, 0x2f5: 0x2d6a, + 0x2f6: 0x2dea, 0x2f7: 0x2e6a, 0x2f8: 0x2eea, 0x2f9: 0x2f6a, 0x2fa: 0x2fea, + 0x2fc: 0x0014, 0x2fd: 0x306a, 0x2fe: 0x30ea, 0x2ff: 0x316a, + // Block 0xc, offset 0x300 + 0x300: 0x0812, 0x301: 0x0812, 0x302: 0x0812, 0x303: 0x0812, 0x304: 0x0812, 0x305: 0x0812, + 0x308: 0x0813, 0x309: 0x0813, 0x30a: 0x0813, 0x30b: 0x0813, + 0x30c: 0x0813, 0x30d: 0x0813, 0x310: 0x3b1a, 0x311: 0x0812, + 0x312: 0x3bfa, 0x313: 0x0812, 0x314: 0x3d3a, 0x315: 0x0812, 0x316: 0x3e7a, 0x317: 0x0812, + 0x319: 0x0813, 0x31b: 0x0813, 0x31d: 0x0813, + 0x31f: 0x0813, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x0812, 0x323: 0x0812, + 0x324: 0x0812, 0x325: 0x0812, 0x326: 0x0812, 0x327: 0x0812, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x0813, 0x32b: 0x0813, 0x32c: 0x0813, 0x32d: 0x0813, 0x32e: 0x0813, 0x32f: 0x0813, + 0x330: 0x9252, 0x331: 0x9252, 0x332: 0x9552, 0x333: 0x9552, 0x334: 0x9852, 0x335: 0x9852, + 0x336: 0x9b52, 0x337: 0x9b52, 0x338: 0x9e52, 0x339: 0x9e52, 0x33a: 0xa152, 0x33b: 0xa152, + 0x33c: 0x4d52, 0x33d: 0x4d52, + // Block 0xd, offset 0x340 + 0x340: 0x3fba, 0x341: 0x40aa, 0x342: 0x419a, 0x343: 0x428a, 0x344: 0x437a, 0x345: 0x446a, + 0x346: 0x455a, 0x347: 0x464a, 0x348: 0x4739, 0x349: 0x4829, 0x34a: 0x4919, 0x34b: 0x4a09, + 0x34c: 0x4af9, 0x34d: 0x4be9, 0x34e: 0x4cd9, 0x34f: 0x4dc9, 0x350: 0x4eba, 0x351: 0x4faa, + 0x352: 0x509a, 0x353: 0x518a, 0x354: 0x527a, 0x355: 0x536a, 0x356: 0x545a, 0x357: 0x554a, + 0x358: 0x5639, 0x359: 0x5729, 0x35a: 0x5819, 0x35b: 0x5909, 0x35c: 0x59f9, 0x35d: 0x5ae9, + 0x35e: 0x5bd9, 0x35f: 0x5cc9, 0x360: 0x5dba, 0x361: 0x5eaa, 0x362: 0x5f9a, 0x363: 0x608a, + 0x364: 0x617a, 0x365: 0x626a, 0x366: 0x635a, 0x367: 0x644a, 0x368: 0x6539, 0x369: 0x6629, + 0x36a: 0x6719, 0x36b: 0x6809, 0x36c: 0x68f9, 0x36d: 0x69e9, 0x36e: 0x6ad9, 0x36f: 0x6bc9, + 0x370: 0x0812, 0x371: 0x0812, 0x372: 0x6cba, 0x373: 0x6dca, 0x374: 0x6e9a, + 0x376: 0x6f7a, 0x377: 0x705a, 0x378: 0x0813, 0x379: 0x0813, 0x37a: 0x9253, 0x37b: 0x9253, + 0x37c: 0x7199, 0x37d: 0x0004, 0x37e: 0x726a, 0x37f: 0x0004, + // Block 0xe, offset 0x380 + 0x380: 0x0004, 0x381: 0x0004, 0x382: 0x72ea, 0x383: 0x73fa, 0x384: 0x74ca, + 0x386: 0x75aa, 0x387: 0x768a, 0x388: 0x9553, 0x389: 0x9553, 0x38a: 0x9853, 0x38b: 0x9853, + 0x38c: 0x77c9, 0x38d: 0x0004, 0x38e: 0x0004, 0x38f: 0x0004, 0x390: 0x0812, 0x391: 0x0812, + 0x392: 0x789a, 0x393: 0x79da, 0x396: 0x7b1a, 0x397: 0x7bfa, + 0x398: 0x0813, 0x399: 0x0813, 0x39a: 0x9b53, 0x39b: 0x9b53, 0x39d: 0x0004, + 0x39e: 0x0004, 0x39f: 0x0004, 0x3a0: 0x0812, 0x3a1: 0x0812, 0x3a2: 0x7d3a, 0x3a3: 0x7e7a, + 0x3a4: 0x7fba, 0x3a5: 0x0912, 0x3a6: 0x809a, 0x3a7: 0x817a, 0x3a8: 0x0813, 0x3a9: 0x0813, + 0x3aa: 0xa153, 0x3ab: 0xa153, 0x3ac: 0x0913, 0x3ad: 0x0004, 0x3ae: 0x0004, 0x3af: 0x0004, + 0x3b2: 0x82ba, 0x3b3: 0x83ca, 0x3b4: 0x849a, + 0x3b6: 0x857a, 0x3b7: 0x865a, 0x3b8: 0x9e53, 0x3b9: 0x9e53, 0x3ba: 0x4d53, 0x3bb: 0x4d53, + 0x3bc: 0x8799, 0x3bd: 0x0004, 0x3be: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3c2: 0x0013, + 0x3c7: 0x0013, 0x3ca: 0x0012, 0x3cb: 0x0013, + 0x3cc: 0x0013, 0x3cd: 0x0013, 0x3ce: 0x0012, 0x3cf: 0x0012, 0x3d0: 0x0013, 0x3d1: 0x0013, + 0x3d2: 0x0013, 0x3d3: 0x0012, 0x3d5: 0x0013, + 0x3d9: 0x0013, 0x3da: 0x0013, 0x3db: 0x0013, 0x3dc: 0x0013, 0x3dd: 0x0013, + 0x3e4: 0x0013, 0x3e6: 0x886b, 0x3e8: 0x0013, + 0x3ea: 0x88cb, 0x3eb: 0x890b, 0x3ec: 0x0013, 0x3ed: 0x0013, 0x3ef: 0x0012, + 0x3f0: 0x0013, 0x3f1: 0x0013, 0x3f2: 0xa453, 0x3f3: 0x0013, 0x3f4: 0x0012, 0x3f5: 0x0010, + 0x3f6: 0x0010, 0x3f7: 0x0010, 0x3f8: 0x0010, 0x3f9: 0x0012, + 0x3fc: 0x0012, 0x3fd: 0x0012, 0x3fe: 0x0013, 0x3ff: 0x0013, + // Block 0x10, offset 0x400 + 0x400: 0x1a13, 0x401: 0x1a13, 0x402: 0x1e13, 0x403: 0x1e13, 0x404: 0x1a13, 0x405: 0x1a13, + 0x406: 0x2613, 0x407: 0x2613, 0x408: 0x2a13, 0x409: 0x2a13, 0x40a: 0x2e13, 0x40b: 0x2e13, + 0x40c: 0x2a13, 0x40d: 0x2a13, 0x40e: 0x2613, 0x40f: 0x2613, 0x410: 0xa752, 0x411: 0xa752, + 0x412: 0xaa52, 0x413: 0xaa52, 0x414: 0xad52, 0x415: 0xad52, 0x416: 0xaa52, 0x417: 0xaa52, + 0x418: 0xa752, 0x419: 0xa752, 0x41a: 0x1a12, 0x41b: 0x1a12, 0x41c: 0x1e12, 0x41d: 0x1e12, + 0x41e: 0x1a12, 0x41f: 0x1a12, 0x420: 0x2612, 0x421: 0x2612, 0x422: 0x2a12, 0x423: 0x2a12, + 0x424: 0x2e12, 0x425: 0x2e12, 0x426: 0x2a12, 0x427: 0x2a12, 0x428: 0x2612, 0x429: 0x2612, + // Block 0x11, offset 0x440 + 0x440: 0x6552, 0x441: 0x6552, 0x442: 0x6552, 0x443: 0x6552, 0x444: 0x6552, 0x445: 0x6552, + 0x446: 0x6552, 0x447: 0x6552, 0x448: 0x6552, 0x449: 0x6552, 0x44a: 0x6552, 0x44b: 0x6552, + 0x44c: 0x6552, 0x44d: 0x6552, 0x44e: 0x6552, 0x44f: 0x6552, 0x450: 0xb052, 0x451: 0xb052, + 0x452: 0xb052, 0x453: 0xb052, 0x454: 0xb052, 0x455: 0xb052, 0x456: 0xb052, 0x457: 0xb052, + 0x458: 0xb052, 0x459: 0xb052, 0x45a: 0xb052, 0x45b: 0xb052, 0x45c: 0xb052, 0x45d: 0xb052, + 0x45e: 0xb052, 0x460: 0x0113, 0x461: 0x0112, 0x462: 0x896b, 0x463: 0x8b53, + 0x464: 0x89cb, 0x465: 0x8a2a, 0x466: 0x8a8a, 0x467: 0x0f13, 0x468: 0x0f12, 0x469: 0x0313, + 0x46a: 0x0312, 0x46b: 0x0713, 0x46c: 0x0712, 0x46d: 0x8aeb, 0x46e: 0x8b4b, 0x46f: 0x8bab, + 0x470: 0x8c0b, 0x471: 0x0012, 0x472: 0x0113, 0x473: 0x0112, 0x474: 0x0012, 0x475: 0x0313, + 0x476: 0x0312, 0x477: 0x0012, 0x478: 0x0012, 0x479: 0x0012, 0x47a: 0x0012, 0x47b: 0x0012, + 0x47c: 0x0015, 0x47d: 0x0015, 0x47e: 0x8c6b, 0x47f: 0x8ccb, + // Block 0x12, offset 0x480 + 0x480: 0x0113, 0x481: 0x0112, 0x482: 0x0113, 0x483: 0x0112, 0x484: 0x0113, 0x485: 0x0112, + 0x486: 0x0113, 0x487: 0x0112, 0x488: 0x0014, 0x489: 0x0014, 0x48a: 0x0014, 0x48b: 0x0713, + 0x48c: 0x0712, 0x48d: 0x8d2b, 0x48e: 0x0012, 0x48f: 0x0010, 0x490: 0x0113, 0x491: 0x0112, + 0x492: 0x0113, 0x493: 0x0112, 0x494: 0x6552, 0x495: 0x0012, 0x496: 0x0113, 0x497: 0x0112, + 0x498: 0x0113, 0x499: 0x0112, 0x49a: 0x0113, 0x49b: 0x0112, 0x49c: 0x0113, 0x49d: 0x0112, + 0x49e: 0x0113, 0x49f: 0x0112, 0x4a0: 0x0113, 0x4a1: 0x0112, 0x4a2: 0x0113, 0x4a3: 0x0112, + 0x4a4: 0x0113, 0x4a5: 0x0112, 0x4a6: 0x0113, 0x4a7: 0x0112, 0x4a8: 0x0113, 0x4a9: 0x0112, + 0x4aa: 0x8d8b, 0x4ab: 0x8deb, 0x4ac: 0x8e4b, 0x4ad: 0x8eab, 0x4ae: 0x8f0b, 0x4af: 0x0012, + 0x4b0: 0x8f6b, 0x4b1: 0x8fcb, 0x4b2: 0x902b, 0x4b3: 0xb353, 0x4b4: 0x0113, 0x4b5: 0x0112, + 0x4b6: 0x0113, 0x4b7: 0x0112, 0x4b8: 0x0113, 0x4b9: 0x0112, 0x4ba: 0x0113, 0x4bb: 0x0112, + 0x4bc: 0x0113, 0x4bd: 0x0112, 0x4be: 0x0113, 0x4bf: 0x0112, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x90ea, 0x4c1: 0x916a, 0x4c2: 0x91ea, 0x4c3: 0x926a, 0x4c4: 0x931a, 0x4c5: 0x93ca, + 0x4c6: 0x944a, + 0x4d3: 0x94ca, 0x4d4: 0x95aa, 0x4d5: 0x968a, 0x4d6: 0x976a, 0x4d7: 0x984a, + 0x4dd: 0x0010, + 0x4de: 0x0034, 0x4df: 0x0010, 0x4e0: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, 0x4e3: 0x0010, + 0x4e4: 0x0010, 0x4e5: 0x0010, 0x4e6: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, + 0x4ea: 0x0010, 0x4eb: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f3: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f8: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, + // Block 0x14, offset 0x500 + 0x500: 0x2213, 0x501: 0x2213, 0x502: 0x2613, 0x503: 0x2613, 0x504: 0x2213, 0x505: 0x2213, + 0x506: 0x2e13, 0x507: 0x2e13, 0x508: 0x2213, 0x509: 0x2213, 0x50a: 0x2613, 0x50b: 0x2613, + 0x50c: 0x2213, 0x50d: 0x2213, 0x50e: 0x3e13, 0x50f: 0x3e13, 0x510: 0x2213, 0x511: 0x2213, + 0x512: 0x2613, 0x513: 0x2613, 0x514: 0x2213, 0x515: 0x2213, 0x516: 0x2e13, 0x517: 0x2e13, + 0x518: 0x2213, 0x519: 0x2213, 0x51a: 0x2613, 0x51b: 0x2613, 0x51c: 0x2213, 0x51d: 0x2213, + 0x51e: 0xbc53, 0x51f: 0xbc53, 0x520: 0xbf53, 0x521: 0xbf53, 0x522: 0x2212, 0x523: 0x2212, + 0x524: 0x2612, 0x525: 0x2612, 0x526: 0x2212, 0x527: 0x2212, 0x528: 0x2e12, 0x529: 0x2e12, + 0x52a: 0x2212, 0x52b: 0x2212, 0x52c: 0x2612, 0x52d: 0x2612, 0x52e: 0x2212, 0x52f: 0x2212, + 0x530: 0x3e12, 0x531: 0x3e12, 0x532: 0x2212, 0x533: 0x2212, 0x534: 0x2612, 0x535: 0x2612, + 0x536: 0x2212, 0x537: 0x2212, 0x538: 0x2e12, 0x539: 0x2e12, 0x53a: 0x2212, 0x53b: 0x2212, + 0x53c: 0x2612, 0x53d: 0x2612, 0x53e: 0x2212, 0x53f: 0x2212, + // Block 0x15, offset 0x540 + 0x542: 0x0010, + 0x547: 0x0010, 0x549: 0x0010, 0x54b: 0x0010, + 0x54d: 0x0010, 0x54e: 0x0010, 0x54f: 0x0010, 0x551: 0x0010, + 0x552: 0x0010, 0x554: 0x0010, 0x557: 0x0010, + 0x559: 0x0010, 0x55b: 0x0010, 0x55d: 0x0010, + 0x55f: 0x0010, 0x561: 0x0010, 0x562: 0x0010, + 0x564: 0x0010, 0x567: 0x0010, 0x568: 0x0010, 0x569: 0x0010, + 0x56a: 0x0010, 0x56c: 0x0010, 0x56d: 0x0010, 0x56e: 0x0010, 0x56f: 0x0010, + 0x570: 0x0010, 0x571: 0x0010, 0x572: 0x0010, 0x574: 0x0010, 0x575: 0x0010, + 0x576: 0x0010, 0x577: 0x0010, 0x579: 0x0010, 0x57a: 0x0010, 0x57b: 0x0010, + 0x57c: 0x0010, 0x57e: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x14, 0xc3: 0x15, 0xc4: 0x16, 0xc5: 0x17, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x18, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x19, 0xcc: 0x1a, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x1b, 0xd1: 0x1c, 0xd2: 0x1d, 0xd3: 0x1e, 0xd4: 0x1f, 0xd5: 0x20, 0xd6: 0x08, 0xd7: 0x21, + 0xd8: 0x22, 0xd9: 0x23, 0xda: 0x24, 0xdb: 0x25, 0xdc: 0x26, 0xdd: 0x27, 0xde: 0x28, 0xdf: 0x29, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x2a, 0x121: 0x2b, 0x122: 0x2c, 0x123: 0x2d, 0x124: 0x2e, 0x125: 0x2f, 0x126: 0x30, 0x127: 0x31, + 0x128: 0x32, 0x129: 0x33, 0x12a: 0x34, 0x12b: 0x35, 0x12c: 0x36, 0x12d: 0x37, 0x12e: 0x38, 0x12f: 0x39, + 0x130: 0x3a, 0x131: 0x3b, 0x132: 0x3c, 0x133: 0x3d, 0x134: 0x3e, 0x135: 0x3f, 0x136: 0x40, 0x137: 0x41, + 0x138: 0x42, 0x139: 0x43, 0x13a: 0x44, 0x13b: 0x45, 0x13c: 0x46, 0x13d: 0x47, 0x13e: 0x48, 0x13f: 0x49, + // Block 0x5, offset 0x140 + 0x140: 0x4a, 0x141: 0x4b, 0x142: 0x4c, 0x143: 0x09, 0x144: 0x24, 0x145: 0x24, 0x146: 0x24, 0x147: 0x24, + 0x148: 0x24, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x24, 0x152: 0x24, 0x153: 0x24, 0x154: 0x24, 0x155: 0x24, 0x156: 0x24, 0x157: 0x24, + 0x158: 0x24, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16b: 0x66, 0x16c: 0x67, 0x16d: 0x68, 0x16e: 0x69, 0x16f: 0x6a, + 0x170: 0x6b, 0x171: 0x6c, 0x172: 0x6d, 0x173: 0x6e, 0x174: 0x6f, 0x175: 0x70, 0x176: 0x71, 0x177: 0x72, + 0x178: 0x73, 0x179: 0x73, 0x17a: 0x74, 0x17b: 0x73, 0x17c: 0x75, 0x17d: 0x0a, 0x17e: 0x0b, 0x17f: 0x0c, + // Block 0x6, offset 0x180 + 0x180: 0x76, 0x181: 0x77, 0x182: 0x78, 0x183: 0x79, 0x184: 0x0d, 0x185: 0x7a, 0x186: 0x7b, + 0x192: 0x7c, 0x193: 0x0e, + 0x1b0: 0x7d, 0x1b1: 0x0f, 0x1b2: 0x73, 0x1b3: 0x7e, 0x1b4: 0x7f, 0x1b5: 0x80, 0x1b6: 0x81, 0x1b7: 0x82, + 0x1b8: 0x83, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x84, 0x1c2: 0x85, 0x1c3: 0x86, 0x1c4: 0x87, 0x1c5: 0x24, 0x1c6: 0x88, + // Block 0x8, offset 0x200 + 0x200: 0x89, 0x201: 0x24, 0x202: 0x24, 0x203: 0x24, 0x204: 0x24, 0x205: 0x24, 0x206: 0x24, 0x207: 0x24, + 0x208: 0x24, 0x209: 0x24, 0x20a: 0x24, 0x20b: 0x24, 0x20c: 0x24, 0x20d: 0x24, 0x20e: 0x24, 0x20f: 0x24, + 0x210: 0x24, 0x211: 0x24, 0x212: 0x8a, 0x213: 0x8b, 0x214: 0x24, 0x215: 0x24, 0x216: 0x24, 0x217: 0x24, + 0x218: 0x8c, 0x219: 0x8d, 0x21a: 0x8e, 0x21b: 0x8f, 0x21c: 0x90, 0x21d: 0x91, 0x21e: 0x10, 0x21f: 0x92, + 0x220: 0x93, 0x221: 0x94, 0x222: 0x24, 0x223: 0x95, 0x224: 0x96, 0x225: 0x97, 0x226: 0x98, 0x227: 0x99, + 0x228: 0x9a, 0x229: 0x9b, 0x22a: 0x9c, 0x22b: 0x9d, 0x22c: 0x9e, 0x22d: 0x9f, 0x22e: 0xa0, 0x22f: 0xa1, + 0x230: 0x24, 0x231: 0x24, 0x232: 0x24, 0x233: 0x24, 0x234: 0x24, 0x235: 0x24, 0x236: 0x24, 0x237: 0x24, + 0x238: 0x24, 0x239: 0x24, 0x23a: 0x24, 0x23b: 0x24, 0x23c: 0x24, 0x23d: 0x24, 0x23e: 0x24, 0x23f: 0x24, + // Block 0x9, offset 0x240 + 0x240: 0x24, 0x241: 0x24, 0x242: 0x24, 0x243: 0x24, 0x244: 0x24, 0x245: 0x24, 0x246: 0x24, 0x247: 0x24, + 0x248: 0x24, 0x249: 0x24, 0x24a: 0x24, 0x24b: 0x24, 0x24c: 0x24, 0x24d: 0x24, 0x24e: 0x24, 0x24f: 0x24, + 0x250: 0x24, 0x251: 0x24, 0x252: 0x24, 0x253: 0x24, 0x254: 0x24, 0x255: 0x24, 0x256: 0x24, 0x257: 0x24, + 0x258: 0x24, 0x259: 0x24, 0x25a: 0x24, 0x25b: 0x24, 0x25c: 0x24, 0x25d: 0x24, 0x25e: 0x24, 0x25f: 0x24, + 0x260: 0x24, 0x261: 0x24, 0x262: 0x24, 0x263: 0x24, 0x264: 0x24, 0x265: 0x24, 0x266: 0x24, 0x267: 0x24, + 0x268: 0x24, 0x269: 0x24, 0x26a: 0x24, 0x26b: 0x24, 0x26c: 0x24, 0x26d: 0x24, 0x26e: 0x24, 0x26f: 0x24, + 0x270: 0x24, 0x271: 0x24, 0x272: 0x24, 0x273: 0x24, 0x274: 0x24, 0x275: 0x24, 0x276: 0x24, 0x277: 0x24, + 0x278: 0x24, 0x279: 0x24, 0x27a: 0x24, 0x27b: 0x24, 0x27c: 0x24, 0x27d: 0x24, 0x27e: 0x24, 0x27f: 0x24, + // Block 0xa, offset 0x280 + 0x280: 0x24, 0x281: 0x24, 0x282: 0x24, 0x283: 0x24, 0x284: 0x24, 0x285: 0x24, 0x286: 0x24, 0x287: 0x24, + 0x288: 0x24, 0x289: 0x24, 0x28a: 0x24, 0x28b: 0x24, 0x28c: 0x24, 0x28d: 0x24, 0x28e: 0x24, 0x28f: 0x24, + 0x290: 0x24, 0x291: 0x24, 0x292: 0x24, 0x293: 0x24, 0x294: 0x24, 0x295: 0x24, 0x296: 0x24, 0x297: 0x24, + 0x298: 0x24, 0x299: 0x24, 0x29a: 0x24, 0x29b: 0x24, 0x29c: 0x24, 0x29d: 0x24, 0x29e: 0xa2, 0x29f: 0xa3, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x11, 0x2ed: 0xa4, 0x2ee: 0xa5, 0x2ef: 0xa6, + 0x2f0: 0x24, 0x2f1: 0x24, 0x2f2: 0x24, 0x2f3: 0x24, 0x2f4: 0xa7, 0x2f5: 0xa8, 0x2f6: 0xa9, 0x2f7: 0xaa, + 0x2f8: 0xab, 0x2f9: 0xac, 0x2fa: 0x24, 0x2fb: 0xad, 0x2fc: 0xae, 0x2fd: 0xaf, 0x2fe: 0xb0, 0x2ff: 0xb1, + // Block 0xc, offset 0x300 + 0x300: 0xb2, 0x301: 0xb3, 0x302: 0x24, 0x303: 0xb4, 0x305: 0xb5, 0x307: 0xb6, + 0x30a: 0xb7, 0x30b: 0xb8, 0x30c: 0xb9, 0x30d: 0xba, 0x30e: 0xbb, 0x30f: 0xbc, + 0x310: 0xbd, 0x311: 0xbe, 0x312: 0xbf, 0x313: 0xc0, 0x314: 0xc1, 0x315: 0xc2, + 0x318: 0x24, 0x319: 0x24, 0x31a: 0x24, 0x31b: 0x24, 0x31c: 0xc3, 0x31d: 0xc4, + 0x320: 0xc5, 0x321: 0xc6, 0x322: 0xc7, 0x323: 0xc8, 0x324: 0xc9, 0x326: 0xca, + 0x328: 0xcb, 0x329: 0xcc, 0x32a: 0xcd, 0x32b: 0xce, 0x32c: 0x5f, 0x32d: 0xcf, 0x32e: 0xd0, + 0x330: 0x24, 0x331: 0xd1, 0x332: 0xd2, 0x333: 0xd3, 0x334: 0xd4, + 0x33a: 0xd5, 0x33c: 0xd6, 0x33d: 0xd7, 0x33e: 0xd8, 0x33f: 0xd9, + // Block 0xd, offset 0x340 + 0x340: 0xda, 0x341: 0xdb, 0x342: 0xdc, 0x343: 0xdd, 0x344: 0xde, 0x345: 0xdf, 0x346: 0xe0, 0x347: 0xe1, + 0x348: 0xe2, 0x34a: 0xe3, 0x34b: 0xe4, 0x34c: 0xe5, 0x34d: 0xe6, + 0x350: 0xe7, 0x351: 0xe8, 0x352: 0xe9, 0x353: 0xea, 0x356: 0xeb, 0x357: 0xec, + 0x358: 0xed, 0x359: 0xee, 0x35a: 0xef, 0x35b: 0xf0, 0x35c: 0xf1, + 0x360: 0xf2, 0x362: 0xf3, 0x363: 0xf4, 0x364: 0xf5, 0x365: 0xf6, 0x366: 0xf7, 0x367: 0xf8, + 0x368: 0xf9, 0x369: 0xfa, 0x36a: 0xfb, 0x36b: 0xfc, + 0x370: 0xfd, 0x371: 0xfe, 0x372: 0xff, 0x374: 0x100, 0x375: 0x101, 0x376: 0x102, + 0x37b: 0x103, 0x37e: 0x104, + // Block 0xe, offset 0x380 + 0x380: 0x24, 0x381: 0x24, 0x382: 0x24, 0x383: 0x24, 0x384: 0x24, 0x385: 0x24, 0x386: 0x24, 0x387: 0x24, + 0x388: 0x24, 0x389: 0x24, 0x38a: 0x24, 0x38b: 0x24, 0x38c: 0x24, 0x38d: 0x24, 0x38e: 0x105, + 0x390: 0x24, 0x391: 0x106, 0x392: 0x24, 0x393: 0x24, 0x394: 0x24, 0x395: 0x107, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x24, 0x3c1: 0x24, 0x3c2: 0x24, 0x3c3: 0x24, 0x3c4: 0x24, 0x3c5: 0x24, 0x3c6: 0x24, 0x3c7: 0x24, + 0x3c8: 0x24, 0x3c9: 0x24, 0x3ca: 0x24, 0x3cb: 0x24, 0x3cc: 0x24, 0x3cd: 0x24, 0x3ce: 0x24, 0x3cf: 0x24, + 0x3d0: 0x108, + // Block 0x10, offset 0x400 + 0x410: 0x24, 0x411: 0x24, 0x412: 0x24, 0x413: 0x24, 0x414: 0x24, 0x415: 0x24, 0x416: 0x24, 0x417: 0x24, + 0x418: 0x24, 0x419: 0x109, + // Block 0x11, offset 0x440 + 0x460: 0x24, 0x461: 0x24, 0x462: 0x24, 0x463: 0x24, 0x464: 0x24, 0x465: 0x24, 0x466: 0x24, 0x467: 0x24, + 0x468: 0xfc, 0x469: 0x10a, 0x46b: 0x10b, 0x46c: 0x10c, 0x46d: 0x10d, 0x46e: 0x10e, + 0x479: 0x10f, 0x47c: 0x24, 0x47d: 0x110, 0x47e: 0x111, 0x47f: 0x112, + // Block 0x12, offset 0x480 + 0x4b0: 0x24, 0x4b1: 0x113, 0x4b2: 0x114, + // Block 0x13, offset 0x4c0 + 0x4c5: 0x115, 0x4c6: 0x116, + 0x4c9: 0x117, + 0x4d0: 0x118, 0x4d1: 0x119, 0x4d2: 0x11a, 0x4d3: 0x11b, 0x4d4: 0x11c, 0x4d5: 0x11d, 0x4d6: 0x11e, 0x4d7: 0x11f, + 0x4d8: 0x120, 0x4d9: 0x121, 0x4da: 0x122, 0x4db: 0x123, 0x4dc: 0x124, 0x4dd: 0x125, 0x4de: 0x126, 0x4df: 0x127, + 0x4e8: 0x128, 0x4e9: 0x129, 0x4ea: 0x12a, + // Block 0x14, offset 0x500 + 0x500: 0x12b, 0x504: 0x12c, 0x505: 0x12d, + 0x50b: 0x12e, + 0x520: 0x24, 0x521: 0x24, 0x522: 0x24, 0x523: 0x12f, 0x524: 0x12, 0x525: 0x130, + 0x538: 0x131, 0x539: 0x13, 0x53a: 0x132, + // Block 0x15, offset 0x540 + 0x544: 0x133, 0x545: 0x134, 0x546: 0x135, + 0x54f: 0x136, + 0x56f: 0x137, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x138, 0x5c1: 0x139, 0x5c4: 0x139, 0x5c5: 0x139, 0x5c6: 0x139, 0x5c7: 0x13a, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 296 entries, 592 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x34, 0x37, 0x3b, 0x3e, 0x42, 0x4c, 0x4e, 0x57, 0x5e, 0x63, 0x71, 0x72, 0x80, 0x8f, 0x99, 0x9c, 0xa3, 0xab, 0xae, 0xb0, 0xc0, 0xc6, 0xd4, 0xdf, 0xec, 0xf7, 0x103, 0x10d, 0x119, 0x124, 0x130, 0x13c, 0x144, 0x14d, 0x157, 0x162, 0x16e, 0x174, 0x17f, 0x185, 0x18d, 0x190, 0x195, 0x199, 0x19d, 0x1a4, 0x1ad, 0x1b5, 0x1b6, 0x1bf, 0x1c6, 0x1ce, 0x1d4, 0x1d9, 0x1dd, 0x1e0, 0x1e2, 0x1e5, 0x1ea, 0x1eb, 0x1ed, 0x1ef, 0x1f1, 0x1f8, 0x1fd, 0x201, 0x20a, 0x20d, 0x210, 0x216, 0x217, 0x222, 0x223, 0x224, 0x229, 0x236, 0x23f, 0x240, 0x248, 0x251, 0x25a, 0x263, 0x268, 0x26b, 0x276, 0x284, 0x286, 0x28d, 0x291, 0x29d, 0x29e, 0x2a9, 0x2b1, 0x2b9, 0x2bf, 0x2c0, 0x2ce, 0x2d3, 0x2d6, 0x2db, 0x2df, 0x2e5, 0x2ea, 0x2ed, 0x2f2, 0x2f7, 0x2f8, 0x2fe, 0x300, 0x301, 0x303, 0x305, 0x308, 0x309, 0x30b, 0x30e, 0x314, 0x318, 0x31a, 0x31f, 0x326, 0x331, 0x33b, 0x33c, 0x345, 0x349, 0x34e, 0x356, 0x35c, 0x362, 0x36c, 0x371, 0x37a, 0x380, 0x389, 0x38d, 0x395, 0x397, 0x399, 0x39c, 0x39e, 0x3a0, 0x3a1, 0x3a2, 0x3a4, 0x3a6, 0x3ac, 0x3b1, 0x3b3, 0x3ba, 0x3bd, 0x3bf, 0x3c5, 0x3ca, 0x3cc, 0x3cd, 0x3ce, 0x3cf, 0x3d1, 0x3d3, 0x3d5, 0x3d8, 0x3da, 0x3dd, 0x3e5, 0x3e8, 0x3ec, 0x3f4, 0x3f6, 0x3f7, 0x3f8, 0x3fa, 0x400, 0x402, 0x403, 0x405, 0x407, 0x409, 0x416, 0x417, 0x418, 0x41c, 0x41e, 0x41f, 0x420, 0x421, 0x422, 0x425, 0x428, 0x42b, 0x431, 0x432, 0x434, 0x438, 0x43c, 0x442, 0x445, 0x44c, 0x450, 0x454, 0x45d, 0x466, 0x46c, 0x472, 0x47c, 0x486, 0x488, 0x491, 0x497, 0x49d, 0x4a3, 0x4a6, 0x4ac, 0x4af, 0x4b8, 0x4b9, 0x4c0, 0x4c4, 0x4c5, 0x4c8, 0x4d2, 0x4d5, 0x4d7, 0x4de, 0x4e6, 0x4ec, 0x4f2, 0x4f3, 0x4f9, 0x4fc, 0x504, 0x50b, 0x515, 0x51d, 0x520, 0x521, 0x522, 0x523, 0x524, 0x526, 0x527, 0x529, 0x52b, 0x52d, 0x531, 0x532, 0x534, 0x537, 0x539, 0x53c, 0x53e, 0x543, 0x548, 0x54c, 0x54d, 0x550, 0x554, 0x55f, 0x563, 0x56b, 0x570, 0x574, 0x577, 0x57b, 0x57e, 0x581, 0x586, 0x58a, 0x58e, 0x592, 0x596, 0x598, 0x59a, 0x59d, 0x5a2, 0x5a5, 0x5a7, 0x5aa, 0x5ac, 0x5b2, 0x5bb, 0x5c0, 0x5c1, 0x5c4, 0x5c5, 0x5c6, 0x5c7, 0x5c9, 0x5ca, 0x5cb} + +// sparseValues: 1483 entries, 5932 bytes +var sparseValues = [1483]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x37 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3b + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3e + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x42 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4c + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4e + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0054, lo: 0x9f, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa0}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x57 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xaf, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xe, offset 0x5e + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xf, offset 0x63 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x10, offset 0x71 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x11, offset 0x72 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x12, offset 0x80 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x14, offset 0x99 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x15, offset 0x9c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0x16, offset 0xa3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x17, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + // Block 0x18, offset 0xae + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x19, offset 0xb0 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0034, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1a, offset 0xc0 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1b, offset 0xc6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1c, offset 0xd4 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1d, offset 0xdf + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xec + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x1f, offset 0xf7 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x20, offset 0x103 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x21, offset 0x10d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x22, offset 0x119 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x23, offset 0x124 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x24, offset 0x130 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x25, offset 0x13c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x26, offset 0x144 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x27, offset 0x14d + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x28, offset 0x157 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x29, offset 0x162 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2a, offset 0x16e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2b, offset 0x174 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2c, offset 0x17f + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2d, offset 0x185 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2e, offset 0x18d + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x2f, offset 0x190 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x30, offset 0x195 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x31, offset 0x199 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x32, offset 0x19d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x33, offset 0x1a4 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x34, offset 0x1ad + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x35, offset 0x1b5 + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x36, offset 0x1b6 + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x37, offset 0x1bf + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x38, offset 0x1c6 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x39, offset 0x1ce + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d4 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3b, offset 0x1d9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3c, offset 0x1dd + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3d, offset 0x1e0 + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x3e, offset 0x1e2 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x3f, offset 0x1e5 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x40, offset 0x1ea + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x41, offset 0x1eb + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x42, offset 0x1ed + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x43, offset 0x1ef + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x44, offset 0x1f1 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x45, offset 0x1f8 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x46, offset 0x1fd + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x47, offset 0x201 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x48, offset 0x20a + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x49, offset 0x20d + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb8}, + // Block 0x4a, offset 0x210 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4b, offset 0x216 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4c, offset 0x217 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4d, offset 0x222 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x4e, offset 0x223 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x4f, offset 0x224 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x50, offset 0x229 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x51, offset 0x236 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x52, offset 0x23f + {value: 0x0034, lo: 0x80, hi: 0x80}, + // Block 0x53, offset 0x240 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x54, offset 0x248 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x55, offset 0x251 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x56, offset 0x25a + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x57, offset 0x263 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x58, offset 0x268 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x59, offset 0x26b + {value: 0x31ea, lo: 0x80, hi: 0x80}, + {value: 0x326a, lo: 0x81, hi: 0x81}, + {value: 0x32ea, lo: 0x82, hi: 0x82}, + {value: 0x336a, lo: 0x83, hi: 0x83}, + {value: 0x33ea, lo: 0x84, hi: 0x84}, + {value: 0x346a, lo: 0x85, hi: 0x85}, + {value: 0x34ea, lo: 0x86, hi: 0x86}, + {value: 0x356a, lo: 0x87, hi: 0x87}, + {value: 0x35ea, lo: 0x88, hi: 0x88}, + {value: 0x8353, lo: 0x90, hi: 0xba}, + {value: 0x8353, lo: 0xbd, hi: 0xbf}, + // Block 0x5a, offset 0x276 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xba}, + // Block 0x5b, offset 0x284 + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5c, offset 0x286 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8752, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8b52, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5d, offset 0x28d + {value: 0x0012, lo: 0x80, hi: 0x8d}, + {value: 0x8f52, lo: 0x8e, hi: 0x8e}, + {value: 0x0012, lo: 0x8f, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5e, offset 0x291 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb9}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x5f, offset 0x29d + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x29e + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x369a, lo: 0x96, hi: 0x96}, + {value: 0x374a, lo: 0x97, hi: 0x97}, + {value: 0x37fa, lo: 0x98, hi: 0x98}, + {value: 0x38aa, lo: 0x99, hi: 0x99}, + {value: 0x395a, lo: 0x9a, hi: 0x9a}, + {value: 0x3a0a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x3abb, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x61, offset 0x2a9 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x62, offset 0x2b1 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x2b9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2bf + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x65, offset 0x2c0 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x66, offset 0x2ce + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0xa452, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x2d3 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x68, offset 0x2d6 + {value: 0xa753, lo: 0xb6, hi: 0xb7}, + {value: 0xaa53, lo: 0xb8, hi: 0xb9}, + {value: 0xad53, lo: 0xba, hi: 0xbb}, + {value: 0xaa53, lo: 0xbc, hi: 0xbd}, + {value: 0xa753, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x2db + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xb053, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6a, offset 0x2df + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6b, offset 0x2e5 + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6c, offset 0x2ea + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6d, offset 0x2ed + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6e, offset 0x2f2 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x6f, offset 0x2f7 + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x70, offset 0x2f8 + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x71, offset 0x2fe + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x72, offset 0x300 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x73, offset 0x301 + {value: 0x0010, lo: 0x85, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x74, offset 0x303 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x75, offset 0x305 + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x76, offset 0x308 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x77, offset 0x309 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x78, offset 0x30b + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x79, offset 0x30e + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7a, offset 0x314 + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7b, offset 0x318 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7c, offset 0x31a + {value: 0x0004, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7d, offset 0x31f + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8753, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7e, offset 0x326 + {value: 0x0117, lo: 0x82, hi: 0x83}, + {value: 0x6553, lo: 0x84, hi: 0x84}, + {value: 0x908b, lo: 0x85, hi: 0x85}, + {value: 0x8f53, lo: 0x86, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0316, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x7f, offset 0x331 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + // Block 0x80, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x81, offset 0x33c + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x82, offset 0x345 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x349 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x84, offset 0x34e + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x85, offset 0x356 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x86, offset 0x35c + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x87, offset 0x362 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x88, offset 0x36c + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x89, offset 0x371 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8a, offset 0x37a + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8b, offset 0x380 + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xb352, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xa9}, + {value: 0x0004, lo: 0xaa, hi: 0xab}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x389 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x38d + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8e, offset 0x395 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x8f, offset 0x397 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x90, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x91, offset 0x39c + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x92, offset 0x39e + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x93, offset 0x3a0 + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x94, offset 0x3a1 + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x95, offset 0x3a2 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x96, offset 0x3a4 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x97, offset 0x3a6 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x98, offset 0x3ac + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x99, offset 0x3b1 + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x3b3 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3ba + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9c, offset 0x3bd + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9d, offset 0x3bf + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9e, offset 0x3c5 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9f, offset 0x3ca + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa0, offset 0x3cc + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa1, offset 0x3cd + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa2, offset 0x3ce + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa3, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa4, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa5, offset 0x3d3 + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa6, offset 0x3d5 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa7, offset 0x3d8 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa8, offset 0x3da + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xa9, offset 0x3dd + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xb653, lo: 0x98, hi: 0x9f}, + {value: 0xb953, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xaa, offset 0x3e5 + {value: 0xb652, lo: 0x80, hi: 0x87}, + {value: 0xb952, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xab, offset 0x3e8 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb953, lo: 0xb0, hi: 0xb7}, + {value: 0xb653, lo: 0xb8, hi: 0xbf}, + // Block 0xac, offset 0x3ec + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb952, lo: 0x98, hi: 0x9f}, + {value: 0xb652, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xad, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xae, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xaf, offset 0x3f7 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb0, offset 0x3f8 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb1, offset 0x3fa + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb2, offset 0x400 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb3, offset 0x402 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb4, offset 0x403 + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb5, offset 0x405 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb6, offset 0x407 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb7, offset 0x409 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb5}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x416 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xb9, offset 0x417 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xba, offset 0x418 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbb, offset 0x41c + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbc, offset 0x41e + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbd, offset 0x41f + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbe, offset 0x420 + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xbf, offset 0x421 + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x422 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc1, offset 0x425 + {value: 0x0010, lo: 0x80, hi: 0xa9}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + // Block 0xc2, offset 0x428 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc3, offset 0x42b + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x87}, + {value: 0x0024, lo: 0x88, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x8b}, + {value: 0x0024, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + // Block 0xc4, offset 0x431 + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc5, offset 0x432 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xc6, offset 0x434 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc7, offset 0x438 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43c + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc9, offset 0x442 + {value: 0x0014, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xca, offset 0x445 + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xcb, offset 0x44c + {value: 0x0010, lo: 0x84, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xcc, offset 0x450 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xcd, offset 0x454 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x89, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xce, offset 0x45d + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xcf, offset 0x466 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xd0, offset 0x46c + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd1, offset 0x472 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xd2, offset 0x47c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xd3, offset 0x486 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xd4, offset 0x488 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + // Block 0xd5, offset 0x491 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x497 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd7, offset 0x49d + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd8, offset 0x4a3 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd9, offset 0x4a6 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xda, offset 0x4ac + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xdb, offset 0x4af + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + // Block 0xdc, offset 0x4b8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xdd, offset 0x4b9 + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xde, offset 0x4c0 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + // Block 0xdf, offset 0x4c4 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xe0, offset 0x4c5 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xe1, offset 0x4c8 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8c, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + {value: 0x0030, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xe2, offset 0x4d2 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0034, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xe3, offset 0x4d5 + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xe4, offset 0x4d7 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0014, lo: 0x94, hi: 0x97}, + {value: 0x0014, lo: 0x9a, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0x9f}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + // Block 0xe5, offset 0x4de + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xe6, offset 0x4e6 + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xe7, offset 0x4ec + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + // Block 0xe8, offset 0x4f2 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xe9, offset 0x4f3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xea, offset 0x4f9 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xeb, offset 0x4fc + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xec, offset 0x504 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xed, offset 0x50b + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa5}, + {value: 0x0010, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xee, offset 0x515 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0x96}, + {value: 0x0034, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xef, offset 0x51d + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + // Block 0xf0, offset 0x520 + {value: 0x0010, lo: 0xb0, hi: 0xb0}, + // Block 0xf1, offset 0x521 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xf2, offset 0x522 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xf3, offset 0x523 + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xf4, offset 0x524 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xb0, hi: 0xb8}, + // Block 0xf5, offset 0x526 + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xf6, offset 0x527 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xf7, offset 0x529 + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xf8, offset 0x52b + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xf9, offset 0x52d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xfa, offset 0x531 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xfb, offset 0x532 + {value: 0x2013, lo: 0x80, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xbf}, + // Block 0xfc, offset 0x534 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xfd, offset 0x537 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xfe, offset 0x539 + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa3, hi: 0xa4}, + {value: 0x0030, lo: 0xb0, hi: 0xb1}, + // Block 0xff, offset 0x53c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0x100, offset 0x53e + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0x101, offset 0x543 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0x102, offset 0x548 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0x103, offset 0x54c + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0x104, offset 0x54d + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0x105, offset 0x550 + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x106, offset 0x554 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0x107, offset 0x55f + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x108, offset 0x563 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0x109, offset 0x56b + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0x10a, offset 0x570 + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0x10b, offset 0x574 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0x10c, offset 0x577 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0x10d, offset 0x57b + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10e, offset 0x57e + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x10f, offset 0x581 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x110, offset 0x586 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x111, offset 0x58a + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x112, offset 0x58e + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x113, offset 0x592 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x114, offset 0x596 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x115, offset 0x598 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x116, offset 0x59a + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x117, offset 0x59d + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x118, offset 0x5a2 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + // Block 0x119, offset 0x5a5 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + // Block 0x11a, offset 0x5a7 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0024, lo: 0xac, hi: 0xaf}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x11b, offset 0x5aa + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x11c, offset 0x5ac + {value: 0xbc52, lo: 0x80, hi: 0x81}, + {value: 0xbf52, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x11d, offset 0x5b2 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x11e, offset 0x5bb + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x11f, offset 0x5c0 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x120, offset 0x5c1 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x121, offset 0x5c4 + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x122, offset 0x5c5 + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x123, offset 0x5c6 + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x124, offset 0x5c7 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x125, offset 0x5c9 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x126, offset 0x5ca + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 15212 bytes (14KiB); checksum: 1EB13752 diff --git a/vendor/golang.org/x/text/cases/tables15.0.0.go b/vendor/golang.org/x/text/cases/tables15.0.0.go new file mode 100644 index 00000000..aee0f310 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables15.0.0.go @@ -0,0 +1,2527 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "15.0.0" + +var xorData string = "" + // Size: 213 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x001\x00\x00\x0b(\x04\x00\x03\x04\x1e\x00\x0b)\x08" + + "\x00\x03\x0a\x00\x02:\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<" + + "\x00\x01&\x00\x01*\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x03'" + + "\x00\x03)\x00\x03+\x00\x03/\x00\x03\x19\x00\x03\x1b\x00\x03\x1f\x00\x01" + + "\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2450 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꟅꟅ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ι" + + "ΙΙ\x166ΐΪ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12" + + "φΦΦ\x12\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x10\x1bᲐა" + + "\x10\x1bᲑბ\x10\x1bᲒგ\x10\x1bᲓდ\x10\x1bᲔე\x10\x1bᲕვ\x10\x1bᲖზ\x10\x1bᲗთ" + + "\x10\x1bᲘი\x10\x1bᲙკ\x10\x1bᲚლ\x10\x1bᲛმ\x10\x1bᲜნ\x10\x1bᲝო\x10\x1bᲞპ" + + "\x10\x1bᲟჟ\x10\x1bᲠრ\x10\x1bᲡს\x10\x1bᲢტ\x10\x1bᲣუ\x10\x1bᲤფ\x10\x1bᲥქ" + + "\x10\x1bᲦღ\x10\x1bᲧყ\x10\x1bᲨშ\x10\x1bᲩჩ\x10\x1bᲪც\x10\x1bᲫძ\x10\x1bᲬწ" + + "\x10\x1bᲭჭ\x10\x1bᲮხ\x10\x1bᲯჯ\x10\x1bᲰჰ\x10\x1bᲱჱ\x10\x1bᲲჲ\x10\x1bᲳჳ" + + "\x10\x1bᲴჴ\x10\x1bᲵჵ\x10\x1bᲶჶ\x10\x1bᲷჷ\x10\x1bᲸჸ\x10\x1bᲹჹ\x10\x1bᲺჺ" + + "\x10\x1bᲽჽ\x10\x1bᲾჾ\x10\x1bᲿჿ\x12\x12вВВ\x12\x12дДД\x12\x12оОО\x12\x12с" + + "СС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13\x1bꙋꙊꙊ\x13\x1bẖH̱H̱" + + "\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1baʾAʾAʾ\x13\x1bṡṠṠ\x12" + + "\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ" + + "\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ" + + "\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ\x15\x1dἄιᾄἌΙ\x15" + + "\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ\x15+ἢιἪΙᾚ\x15+ἣι" + + "ἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨΙ\x15\x1dἡιᾑἩΙ" + + "\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15\x1dἦιᾖἮΙ\x15" + + "\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ\x15+ὥιὭΙᾭ" + + "\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ\x15\x1dὣιᾣὫΙ" + + "\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰιᾺΙᾺͅ\x14#αιΑΙ" + + "ᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x12ιΙΙ\x15-ὴιῊΙ" + + "Ὴͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ\x166ῒΙ" + + "̈̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ̀\x166ΰΫ́Ϋ" + + "́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ" + + "\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12" + + "\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12" + + "\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12" + + "\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x10ʂʂ\x12\x12ffFFFf" + + "\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12st" + + "STSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄ" + + "խ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 13398 bytes (13.08 KiB). Checksum: 544af6e6b1b70931. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 22: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 22 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 24 blocks, 1536 entries, 3072 bytes +// The third block is the zero block. +var caseValues = [1536]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x110a, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x118a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x120a, + 0x19e: 0x128a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x130d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x138a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x14ca, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x160a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x168a, 0x251: 0x170a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x178a, 0x256: 0x180a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x188a, 0x271: 0x190a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x198a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x6852, 0x281: 0x6852, 0x282: 0x6852, 0x283: 0x6852, 0x284: 0x6852, 0x285: 0x6852, + 0x286: 0x6852, 0x287: 0x1a0a, 0x288: 0x0012, 0x28a: 0x0010, + 0x291: 0x0034, + 0x292: 0x0024, 0x293: 0x0024, 0x294: 0x0024, 0x295: 0x0024, 0x296: 0x0034, 0x297: 0x0024, + 0x298: 0x0024, 0x299: 0x0024, 0x29a: 0x0034, 0x29b: 0x0034, 0x29c: 0x0024, 0x29d: 0x0024, + 0x29e: 0x0024, 0x29f: 0x0024, 0x2a0: 0x0024, 0x2a1: 0x0024, 0x2a2: 0x0034, 0x2a3: 0x0034, + 0x2a4: 0x0034, 0x2a5: 0x0034, 0x2a6: 0x0034, 0x2a7: 0x0034, 0x2a8: 0x0024, 0x2a9: 0x0024, + 0x2aa: 0x0034, 0x2ab: 0x0024, 0x2ac: 0x0024, 0x2ad: 0x0034, 0x2ae: 0x0034, 0x2af: 0x0024, + 0x2b0: 0x0034, 0x2b1: 0x0034, 0x2b2: 0x0034, 0x2b3: 0x0034, 0x2b4: 0x0034, 0x2b5: 0x0034, + 0x2b6: 0x0034, 0x2b7: 0x0034, 0x2b8: 0x0034, 0x2b9: 0x0034, 0x2ba: 0x0034, 0x2bb: 0x0034, + 0x2bc: 0x0034, 0x2bd: 0x0034, 0x2bf: 0x0034, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0010, 0x2c1: 0x0010, 0x2c2: 0x0010, 0x2c3: 0x0010, 0x2c4: 0x0010, 0x2c5: 0x0010, + 0x2c6: 0x0010, 0x2c7: 0x0010, 0x2c8: 0x0010, 0x2c9: 0x0014, 0x2ca: 0x0024, 0x2cb: 0x0024, + 0x2cc: 0x0024, 0x2cd: 0x0024, 0x2ce: 0x0024, 0x2cf: 0x0034, 0x2d0: 0x0034, 0x2d1: 0x0034, + 0x2d2: 0x0034, 0x2d3: 0x0034, 0x2d4: 0x0024, 0x2d5: 0x0024, 0x2d6: 0x0024, 0x2d7: 0x0024, + 0x2d8: 0x0024, 0x2d9: 0x0024, 0x2da: 0x0024, 0x2db: 0x0024, 0x2dc: 0x0024, 0x2dd: 0x0024, + 0x2de: 0x0024, 0x2df: 0x0024, 0x2e0: 0x0024, 0x2e1: 0x0024, 0x2e2: 0x0014, 0x2e3: 0x0034, + 0x2e4: 0x0024, 0x2e5: 0x0024, 0x2e6: 0x0034, 0x2e7: 0x0024, 0x2e8: 0x0024, 0x2e9: 0x0034, + 0x2ea: 0x0024, 0x2eb: 0x0024, 0x2ec: 0x0024, 0x2ed: 0x0034, 0x2ee: 0x0034, 0x2ef: 0x0034, + 0x2f0: 0x0034, 0x2f1: 0x0034, 0x2f2: 0x0034, 0x2f3: 0x0024, 0x2f4: 0x0024, 0x2f5: 0x0024, + 0x2f6: 0x0034, 0x2f7: 0x0024, 0x2f8: 0x0024, 0x2f9: 0x0034, 0x2fa: 0x0034, 0x2fb: 0x0024, + 0x2fc: 0x0024, 0x2fd: 0x0024, 0x2fe: 0x0024, 0x2ff: 0x0024, + // Block 0xc, offset 0x300 + 0x300: 0x7053, 0x301: 0x7053, 0x302: 0x7053, 0x303: 0x7053, 0x304: 0x7053, 0x305: 0x7053, + 0x307: 0x7053, + 0x30d: 0x7053, 0x310: 0x1aea, 0x311: 0x1b6a, + 0x312: 0x1bea, 0x313: 0x1c6a, 0x314: 0x1cea, 0x315: 0x1d6a, 0x316: 0x1dea, 0x317: 0x1e6a, + 0x318: 0x1eea, 0x319: 0x1f6a, 0x31a: 0x1fea, 0x31b: 0x206a, 0x31c: 0x20ea, 0x31d: 0x216a, + 0x31e: 0x21ea, 0x31f: 0x226a, 0x320: 0x22ea, 0x321: 0x236a, 0x322: 0x23ea, 0x323: 0x246a, + 0x324: 0x24ea, 0x325: 0x256a, 0x326: 0x25ea, 0x327: 0x266a, 0x328: 0x26ea, 0x329: 0x276a, + 0x32a: 0x27ea, 0x32b: 0x286a, 0x32c: 0x28ea, 0x32d: 0x296a, 0x32e: 0x29ea, 0x32f: 0x2a6a, + 0x330: 0x2aea, 0x331: 0x2b6a, 0x332: 0x2bea, 0x333: 0x2c6a, 0x334: 0x2cea, 0x335: 0x2d6a, + 0x336: 0x2dea, 0x337: 0x2e6a, 0x338: 0x2eea, 0x339: 0x2f6a, 0x33a: 0x2fea, + 0x33c: 0x0015, 0x33d: 0x306a, 0x33e: 0x30ea, 0x33f: 0x316a, + // Block 0xd, offset 0x340 + 0x340: 0x0812, 0x341: 0x0812, 0x342: 0x0812, 0x343: 0x0812, 0x344: 0x0812, 0x345: 0x0812, + 0x348: 0x0813, 0x349: 0x0813, 0x34a: 0x0813, 0x34b: 0x0813, + 0x34c: 0x0813, 0x34d: 0x0813, 0x350: 0x3b1a, 0x351: 0x0812, + 0x352: 0x3bfa, 0x353: 0x0812, 0x354: 0x3d3a, 0x355: 0x0812, 0x356: 0x3e7a, 0x357: 0x0812, + 0x359: 0x0813, 0x35b: 0x0813, 0x35d: 0x0813, + 0x35f: 0x0813, 0x360: 0x0812, 0x361: 0x0812, 0x362: 0x0812, 0x363: 0x0812, + 0x364: 0x0812, 0x365: 0x0812, 0x366: 0x0812, 0x367: 0x0812, 0x368: 0x0813, 0x369: 0x0813, + 0x36a: 0x0813, 0x36b: 0x0813, 0x36c: 0x0813, 0x36d: 0x0813, 0x36e: 0x0813, 0x36f: 0x0813, + 0x370: 0x9252, 0x371: 0x9252, 0x372: 0x9552, 0x373: 0x9552, 0x374: 0x9852, 0x375: 0x9852, + 0x376: 0x9b52, 0x377: 0x9b52, 0x378: 0x9e52, 0x379: 0x9e52, 0x37a: 0xa152, 0x37b: 0xa152, + 0x37c: 0x4d52, 0x37d: 0x4d52, + // Block 0xe, offset 0x380 + 0x380: 0x3fba, 0x381: 0x40aa, 0x382: 0x419a, 0x383: 0x428a, 0x384: 0x437a, 0x385: 0x446a, + 0x386: 0x455a, 0x387: 0x464a, 0x388: 0x4739, 0x389: 0x4829, 0x38a: 0x4919, 0x38b: 0x4a09, + 0x38c: 0x4af9, 0x38d: 0x4be9, 0x38e: 0x4cd9, 0x38f: 0x4dc9, 0x390: 0x4eba, 0x391: 0x4faa, + 0x392: 0x509a, 0x393: 0x518a, 0x394: 0x527a, 0x395: 0x536a, 0x396: 0x545a, 0x397: 0x554a, + 0x398: 0x5639, 0x399: 0x5729, 0x39a: 0x5819, 0x39b: 0x5909, 0x39c: 0x59f9, 0x39d: 0x5ae9, + 0x39e: 0x5bd9, 0x39f: 0x5cc9, 0x3a0: 0x5dba, 0x3a1: 0x5eaa, 0x3a2: 0x5f9a, 0x3a3: 0x608a, + 0x3a4: 0x617a, 0x3a5: 0x626a, 0x3a6: 0x635a, 0x3a7: 0x644a, 0x3a8: 0x6539, 0x3a9: 0x6629, + 0x3aa: 0x6719, 0x3ab: 0x6809, 0x3ac: 0x68f9, 0x3ad: 0x69e9, 0x3ae: 0x6ad9, 0x3af: 0x6bc9, + 0x3b0: 0x0812, 0x3b1: 0x0812, 0x3b2: 0x6cba, 0x3b3: 0x6dca, 0x3b4: 0x6e9a, + 0x3b6: 0x6f7a, 0x3b7: 0x705a, 0x3b8: 0x0813, 0x3b9: 0x0813, 0x3ba: 0x9253, 0x3bb: 0x9253, + 0x3bc: 0x7199, 0x3bd: 0x0004, 0x3be: 0x726a, 0x3bf: 0x0004, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0004, 0x3c1: 0x0004, 0x3c2: 0x72ea, 0x3c3: 0x73fa, 0x3c4: 0x74ca, + 0x3c6: 0x75aa, 0x3c7: 0x768a, 0x3c8: 0x9553, 0x3c9: 0x9553, 0x3ca: 0x9853, 0x3cb: 0x9853, + 0x3cc: 0x77c9, 0x3cd: 0x0004, 0x3ce: 0x0004, 0x3cf: 0x0004, 0x3d0: 0x0812, 0x3d1: 0x0812, + 0x3d2: 0x789a, 0x3d3: 0x79da, 0x3d6: 0x7b1a, 0x3d7: 0x7bfa, + 0x3d8: 0x0813, 0x3d9: 0x0813, 0x3da: 0x9b53, 0x3db: 0x9b53, 0x3dd: 0x0004, + 0x3de: 0x0004, 0x3df: 0x0004, 0x3e0: 0x0812, 0x3e1: 0x0812, 0x3e2: 0x7d3a, 0x3e3: 0x7e7a, + 0x3e4: 0x7fba, 0x3e5: 0x0912, 0x3e6: 0x809a, 0x3e7: 0x817a, 0x3e8: 0x0813, 0x3e9: 0x0813, + 0x3ea: 0xa153, 0x3eb: 0xa153, 0x3ec: 0x0913, 0x3ed: 0x0004, 0x3ee: 0x0004, 0x3ef: 0x0004, + 0x3f2: 0x82ba, 0x3f3: 0x83ca, 0x3f4: 0x849a, + 0x3f6: 0x857a, 0x3f7: 0x865a, 0x3f8: 0x9e53, 0x3f9: 0x9e53, 0x3fa: 0x4d53, 0x3fb: 0x4d53, + 0x3fc: 0x8799, 0x3fd: 0x0004, 0x3fe: 0x0004, + // Block 0x10, offset 0x400 + 0x402: 0x0013, + 0x407: 0x0013, 0x40a: 0x0012, 0x40b: 0x0013, + 0x40c: 0x0013, 0x40d: 0x0013, 0x40e: 0x0012, 0x40f: 0x0012, 0x410: 0x0013, 0x411: 0x0013, + 0x412: 0x0013, 0x413: 0x0012, 0x415: 0x0013, + 0x419: 0x0013, 0x41a: 0x0013, 0x41b: 0x0013, 0x41c: 0x0013, 0x41d: 0x0013, + 0x424: 0x0013, 0x426: 0x886b, 0x428: 0x0013, + 0x42a: 0x88cb, 0x42b: 0x890b, 0x42c: 0x0013, 0x42d: 0x0013, 0x42f: 0x0012, + 0x430: 0x0013, 0x431: 0x0013, 0x432: 0xa453, 0x433: 0x0013, 0x434: 0x0012, 0x435: 0x0010, + 0x436: 0x0010, 0x437: 0x0010, 0x438: 0x0010, 0x439: 0x0012, + 0x43c: 0x0012, 0x43d: 0x0012, 0x43e: 0x0013, 0x43f: 0x0013, + // Block 0x11, offset 0x440 + 0x440: 0x1a13, 0x441: 0x1a13, 0x442: 0x1e13, 0x443: 0x1e13, 0x444: 0x1a13, 0x445: 0x1a13, + 0x446: 0x2613, 0x447: 0x2613, 0x448: 0x2a13, 0x449: 0x2a13, 0x44a: 0x2e13, 0x44b: 0x2e13, + 0x44c: 0x2a13, 0x44d: 0x2a13, 0x44e: 0x2613, 0x44f: 0x2613, 0x450: 0xa752, 0x451: 0xa752, + 0x452: 0xaa52, 0x453: 0xaa52, 0x454: 0xad52, 0x455: 0xad52, 0x456: 0xaa52, 0x457: 0xaa52, + 0x458: 0xa752, 0x459: 0xa752, 0x45a: 0x1a12, 0x45b: 0x1a12, 0x45c: 0x1e12, 0x45d: 0x1e12, + 0x45e: 0x1a12, 0x45f: 0x1a12, 0x460: 0x2612, 0x461: 0x2612, 0x462: 0x2a12, 0x463: 0x2a12, + 0x464: 0x2e12, 0x465: 0x2e12, 0x466: 0x2a12, 0x467: 0x2a12, 0x468: 0x2612, 0x469: 0x2612, + // Block 0x12, offset 0x480 + 0x480: 0x6552, 0x481: 0x6552, 0x482: 0x6552, 0x483: 0x6552, 0x484: 0x6552, 0x485: 0x6552, + 0x486: 0x6552, 0x487: 0x6552, 0x488: 0x6552, 0x489: 0x6552, 0x48a: 0x6552, 0x48b: 0x6552, + 0x48c: 0x6552, 0x48d: 0x6552, 0x48e: 0x6552, 0x48f: 0x6552, 0x490: 0xb052, 0x491: 0xb052, + 0x492: 0xb052, 0x493: 0xb052, 0x494: 0xb052, 0x495: 0xb052, 0x496: 0xb052, 0x497: 0xb052, + 0x498: 0xb052, 0x499: 0xb052, 0x49a: 0xb052, 0x49b: 0xb052, 0x49c: 0xb052, 0x49d: 0xb052, + 0x49e: 0xb052, 0x49f: 0xb052, 0x4a0: 0x0113, 0x4a1: 0x0112, 0x4a2: 0x896b, 0x4a3: 0x8b53, + 0x4a4: 0x89cb, 0x4a5: 0x8a2a, 0x4a6: 0x8a8a, 0x4a7: 0x0f13, 0x4a8: 0x0f12, 0x4a9: 0x0313, + 0x4aa: 0x0312, 0x4ab: 0x0713, 0x4ac: 0x0712, 0x4ad: 0x8aeb, 0x4ae: 0x8b4b, 0x4af: 0x8bab, + 0x4b0: 0x8c0b, 0x4b1: 0x0012, 0x4b2: 0x0113, 0x4b3: 0x0112, 0x4b4: 0x0012, 0x4b5: 0x0313, + 0x4b6: 0x0312, 0x4b7: 0x0012, 0x4b8: 0x0012, 0x4b9: 0x0012, 0x4ba: 0x0012, 0x4bb: 0x0012, + 0x4bc: 0x0015, 0x4bd: 0x0015, 0x4be: 0x8c6b, 0x4bf: 0x8ccb, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0113, 0x4c1: 0x0112, 0x4c2: 0x0113, 0x4c3: 0x0112, 0x4c4: 0x0113, 0x4c5: 0x0112, + 0x4c6: 0x0113, 0x4c7: 0x0112, 0x4c8: 0x0014, 0x4c9: 0x0014, 0x4ca: 0x0014, 0x4cb: 0x0713, + 0x4cc: 0x0712, 0x4cd: 0x8d2b, 0x4ce: 0x0012, 0x4cf: 0x0010, 0x4d0: 0x0113, 0x4d1: 0x0112, + 0x4d2: 0x0113, 0x4d3: 0x0112, 0x4d4: 0x6552, 0x4d5: 0x0012, 0x4d6: 0x0113, 0x4d7: 0x0112, + 0x4d8: 0x0113, 0x4d9: 0x0112, 0x4da: 0x0113, 0x4db: 0x0112, 0x4dc: 0x0113, 0x4dd: 0x0112, + 0x4de: 0x0113, 0x4df: 0x0112, 0x4e0: 0x0113, 0x4e1: 0x0112, 0x4e2: 0x0113, 0x4e3: 0x0112, + 0x4e4: 0x0113, 0x4e5: 0x0112, 0x4e6: 0x0113, 0x4e7: 0x0112, 0x4e8: 0x0113, 0x4e9: 0x0112, + 0x4ea: 0x8d8b, 0x4eb: 0x8deb, 0x4ec: 0x8e4b, 0x4ed: 0x8eab, 0x4ee: 0x8f0b, 0x4ef: 0x0012, + 0x4f0: 0x8f6b, 0x4f1: 0x8fcb, 0x4f2: 0x902b, 0x4f3: 0xb353, 0x4f4: 0x0113, 0x4f5: 0x0112, + 0x4f6: 0x0113, 0x4f7: 0x0112, 0x4f8: 0x0113, 0x4f9: 0x0112, 0x4fa: 0x0113, 0x4fb: 0x0112, + 0x4fc: 0x0113, 0x4fd: 0x0112, 0x4fe: 0x0113, 0x4ff: 0x0112, + // Block 0x14, offset 0x500 + 0x500: 0x90ea, 0x501: 0x916a, 0x502: 0x91ea, 0x503: 0x926a, 0x504: 0x931a, 0x505: 0x93ca, + 0x506: 0x944a, + 0x513: 0x94ca, 0x514: 0x95aa, 0x515: 0x968a, 0x516: 0x976a, 0x517: 0x984a, + 0x51d: 0x0010, + 0x51e: 0x0034, 0x51f: 0x0010, 0x520: 0x0010, 0x521: 0x0010, 0x522: 0x0010, 0x523: 0x0010, + 0x524: 0x0010, 0x525: 0x0010, 0x526: 0x0010, 0x527: 0x0010, 0x528: 0x0010, + 0x52a: 0x0010, 0x52b: 0x0010, 0x52c: 0x0010, 0x52d: 0x0010, 0x52e: 0x0010, 0x52f: 0x0010, + 0x530: 0x0010, 0x531: 0x0010, 0x532: 0x0010, 0x533: 0x0010, 0x534: 0x0010, 0x535: 0x0010, + 0x536: 0x0010, 0x538: 0x0010, 0x539: 0x0010, 0x53a: 0x0010, 0x53b: 0x0010, + 0x53c: 0x0010, 0x53e: 0x0010, + // Block 0x15, offset 0x540 + 0x540: 0x2713, 0x541: 0x2913, 0x542: 0x2b13, 0x543: 0x2913, 0x544: 0x2f13, 0x545: 0x2913, + 0x546: 0x2b13, 0x547: 0x2913, 0x548: 0x2713, 0x549: 0x3913, 0x54a: 0x3b13, + 0x54c: 0x3f13, 0x54d: 0x3913, 0x54e: 0x3b13, 0x54f: 0x3913, 0x550: 0x2713, 0x551: 0x2913, + 0x552: 0x2b13, 0x554: 0x2f13, 0x555: 0x2913, 0x557: 0xbc52, + 0x558: 0xbf52, 0x559: 0xc252, 0x55a: 0xbf52, 0x55b: 0xc552, 0x55c: 0xbf52, 0x55d: 0xc252, + 0x55e: 0xbf52, 0x55f: 0xbc52, 0x560: 0xc852, 0x561: 0xcb52, 0x563: 0xce52, + 0x564: 0xc852, 0x565: 0xcb52, 0x566: 0xc852, 0x567: 0x2712, 0x568: 0x2912, 0x569: 0x2b12, + 0x56a: 0x2912, 0x56b: 0x2f12, 0x56c: 0x2912, 0x56d: 0x2b12, 0x56e: 0x2912, 0x56f: 0x2712, + 0x570: 0x3912, 0x571: 0x3b12, 0x573: 0x3f12, 0x574: 0x3912, 0x575: 0x3b12, + 0x576: 0x3912, 0x577: 0x2712, 0x578: 0x2912, 0x579: 0x2b12, 0x57b: 0x2f12, + 0x57c: 0x2912, + // Block 0x16, offset 0x580 + 0x580: 0x2213, 0x581: 0x2213, 0x582: 0x2613, 0x583: 0x2613, 0x584: 0x2213, 0x585: 0x2213, + 0x586: 0x2e13, 0x587: 0x2e13, 0x588: 0x2213, 0x589: 0x2213, 0x58a: 0x2613, 0x58b: 0x2613, + 0x58c: 0x2213, 0x58d: 0x2213, 0x58e: 0x3e13, 0x58f: 0x3e13, 0x590: 0x2213, 0x591: 0x2213, + 0x592: 0x2613, 0x593: 0x2613, 0x594: 0x2213, 0x595: 0x2213, 0x596: 0x2e13, 0x597: 0x2e13, + 0x598: 0x2213, 0x599: 0x2213, 0x59a: 0x2613, 0x59b: 0x2613, 0x59c: 0x2213, 0x59d: 0x2213, + 0x59e: 0xd153, 0x59f: 0xd153, 0x5a0: 0xd453, 0x5a1: 0xd453, 0x5a2: 0x2212, 0x5a3: 0x2212, + 0x5a4: 0x2612, 0x5a5: 0x2612, 0x5a6: 0x2212, 0x5a7: 0x2212, 0x5a8: 0x2e12, 0x5a9: 0x2e12, + 0x5aa: 0x2212, 0x5ab: 0x2212, 0x5ac: 0x2612, 0x5ad: 0x2612, 0x5ae: 0x2212, 0x5af: 0x2212, + 0x5b0: 0x3e12, 0x5b1: 0x3e12, 0x5b2: 0x2212, 0x5b3: 0x2212, 0x5b4: 0x2612, 0x5b5: 0x2612, + 0x5b6: 0x2212, 0x5b7: 0x2212, 0x5b8: 0x2e12, 0x5b9: 0x2e12, 0x5ba: 0x2212, 0x5bb: 0x2212, + 0x5bc: 0x2612, 0x5bd: 0x2612, 0x5be: 0x2212, 0x5bf: 0x2212, + // Block 0x17, offset 0x5c0 + 0x5c2: 0x0010, + 0x5c7: 0x0010, 0x5c9: 0x0010, 0x5cb: 0x0010, + 0x5cd: 0x0010, 0x5ce: 0x0010, 0x5cf: 0x0010, 0x5d1: 0x0010, + 0x5d2: 0x0010, 0x5d4: 0x0010, 0x5d7: 0x0010, + 0x5d9: 0x0010, 0x5db: 0x0010, 0x5dd: 0x0010, + 0x5df: 0x0010, 0x5e1: 0x0010, 0x5e2: 0x0010, + 0x5e4: 0x0010, 0x5e7: 0x0010, 0x5e8: 0x0010, 0x5e9: 0x0010, + 0x5ea: 0x0010, 0x5ec: 0x0010, 0x5ed: 0x0010, 0x5ee: 0x0010, 0x5ef: 0x0010, + 0x5f0: 0x0010, 0x5f1: 0x0010, 0x5f2: 0x0010, 0x5f4: 0x0010, 0x5f5: 0x0010, + 0x5f6: 0x0010, 0x5f7: 0x0010, 0x5f9: 0x0010, 0x5fa: 0x0010, 0x5fb: 0x0010, + 0x5fc: 0x0010, 0x5fe: 0x0010, +} + +// caseIndex: 27 blocks, 1728 entries, 3456 bytes +// Block 0 is the zero block. +var caseIndex = [1728]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x16, 0xc3: 0x17, 0xc4: 0x18, 0xc5: 0x19, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x1a, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x1b, 0xcc: 0x1c, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x1d, 0xd1: 0x1e, 0xd2: 0x1f, 0xd3: 0x20, 0xd4: 0x21, 0xd5: 0x22, 0xd6: 0x08, 0xd7: 0x23, + 0xd8: 0x24, 0xd9: 0x25, 0xda: 0x26, 0xdb: 0x27, 0xdc: 0x28, 0xdd: 0x29, 0xde: 0x2a, 0xdf: 0x2b, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x16, 0xf3: 0x18, + // Block 0x4, offset 0x100 + 0x120: 0x2c, 0x121: 0x2d, 0x122: 0x2e, 0x123: 0x09, 0x124: 0x2f, 0x125: 0x30, 0x126: 0x31, 0x127: 0x32, + 0x128: 0x33, 0x129: 0x34, 0x12a: 0x35, 0x12b: 0x36, 0x12c: 0x37, 0x12d: 0x38, 0x12e: 0x39, 0x12f: 0x3a, + 0x130: 0x3b, 0x131: 0x3c, 0x132: 0x3d, 0x133: 0x3e, 0x134: 0x3f, 0x135: 0x40, 0x136: 0x41, 0x137: 0x42, + 0x138: 0x43, 0x139: 0x44, 0x13a: 0x45, 0x13b: 0x46, 0x13c: 0x47, 0x13d: 0x48, 0x13e: 0x49, 0x13f: 0x4a, + // Block 0x5, offset 0x140 + 0x140: 0x4b, 0x141: 0x4c, 0x142: 0x4d, 0x143: 0x0a, 0x144: 0x26, 0x145: 0x26, 0x146: 0x26, 0x147: 0x26, + 0x148: 0x26, 0x149: 0x4e, 0x14a: 0x4f, 0x14b: 0x50, 0x14c: 0x51, 0x14d: 0x52, 0x14e: 0x53, 0x14f: 0x54, + 0x150: 0x55, 0x151: 0x26, 0x152: 0x26, 0x153: 0x26, 0x154: 0x26, 0x155: 0x26, 0x156: 0x26, 0x157: 0x26, + 0x158: 0x26, 0x159: 0x56, 0x15a: 0x57, 0x15b: 0x58, 0x15c: 0x59, 0x15d: 0x5a, 0x15e: 0x5b, 0x15f: 0x5c, + 0x160: 0x5d, 0x161: 0x5e, 0x162: 0x5f, 0x163: 0x60, 0x164: 0x61, 0x165: 0x62, 0x167: 0x63, + 0x168: 0x64, 0x169: 0x65, 0x16a: 0x66, 0x16b: 0x67, 0x16c: 0x68, 0x16d: 0x69, 0x16e: 0x6a, 0x16f: 0x6b, + 0x170: 0x6c, 0x171: 0x6d, 0x172: 0x6e, 0x173: 0x6f, 0x174: 0x70, 0x175: 0x71, 0x176: 0x72, 0x177: 0x73, + 0x178: 0x74, 0x179: 0x74, 0x17a: 0x75, 0x17b: 0x74, 0x17c: 0x76, 0x17d: 0x0b, 0x17e: 0x0c, 0x17f: 0x0d, + // Block 0x6, offset 0x180 + 0x180: 0x77, 0x181: 0x78, 0x182: 0x79, 0x183: 0x7a, 0x184: 0x0e, 0x185: 0x7b, 0x186: 0x7c, + 0x192: 0x7d, 0x193: 0x0f, + 0x1b0: 0x7e, 0x1b1: 0x10, 0x1b2: 0x74, 0x1b3: 0x7f, 0x1b4: 0x80, 0x1b5: 0x81, 0x1b6: 0x82, 0x1b7: 0x83, + 0x1b8: 0x84, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x85, 0x1c2: 0x86, 0x1c3: 0x87, 0x1c4: 0x88, 0x1c5: 0x26, 0x1c6: 0x89, + // Block 0x8, offset 0x200 + 0x200: 0x8a, 0x201: 0x26, 0x202: 0x26, 0x203: 0x26, 0x204: 0x26, 0x205: 0x26, 0x206: 0x26, 0x207: 0x26, + 0x208: 0x26, 0x209: 0x26, 0x20a: 0x26, 0x20b: 0x26, 0x20c: 0x26, 0x20d: 0x26, 0x20e: 0x26, 0x20f: 0x26, + 0x210: 0x26, 0x211: 0x26, 0x212: 0x8b, 0x213: 0x8c, 0x214: 0x26, 0x215: 0x26, 0x216: 0x26, 0x217: 0x26, + 0x218: 0x8d, 0x219: 0x8e, 0x21a: 0x8f, 0x21b: 0x90, 0x21c: 0x91, 0x21d: 0x92, 0x21e: 0x11, 0x21f: 0x93, + 0x220: 0x94, 0x221: 0x95, 0x222: 0x26, 0x223: 0x96, 0x224: 0x97, 0x225: 0x98, 0x226: 0x99, 0x227: 0x9a, + 0x228: 0x9b, 0x229: 0x9c, 0x22a: 0x9d, 0x22b: 0x9e, 0x22c: 0x9f, 0x22d: 0xa0, 0x22e: 0xa1, 0x22f: 0xa2, + 0x230: 0x26, 0x231: 0x26, 0x232: 0x26, 0x233: 0x26, 0x234: 0x26, 0x235: 0x26, 0x236: 0x26, 0x237: 0x26, + 0x238: 0x26, 0x239: 0x26, 0x23a: 0x26, 0x23b: 0x26, 0x23c: 0x26, 0x23d: 0x26, 0x23e: 0x26, 0x23f: 0x26, + // Block 0x9, offset 0x240 + 0x240: 0x26, 0x241: 0x26, 0x242: 0x26, 0x243: 0x26, 0x244: 0x26, 0x245: 0x26, 0x246: 0x26, 0x247: 0x26, + 0x248: 0x26, 0x249: 0x26, 0x24a: 0x26, 0x24b: 0x26, 0x24c: 0x26, 0x24d: 0x26, 0x24e: 0x26, 0x24f: 0x26, + 0x250: 0x26, 0x251: 0x26, 0x252: 0x26, 0x253: 0x26, 0x254: 0x26, 0x255: 0x26, 0x256: 0x26, 0x257: 0x26, + 0x258: 0x26, 0x259: 0x26, 0x25a: 0x26, 0x25b: 0x26, 0x25c: 0x26, 0x25d: 0x26, 0x25e: 0x26, 0x25f: 0x26, + 0x260: 0x26, 0x261: 0x26, 0x262: 0x26, 0x263: 0x26, 0x264: 0x26, 0x265: 0x26, 0x266: 0x26, 0x267: 0x26, + 0x268: 0x26, 0x269: 0x26, 0x26a: 0x26, 0x26b: 0x26, 0x26c: 0x26, 0x26d: 0x26, 0x26e: 0x26, 0x26f: 0x26, + 0x270: 0x26, 0x271: 0x26, 0x272: 0x26, 0x273: 0x26, 0x274: 0x26, 0x275: 0x26, 0x276: 0x26, 0x277: 0x26, + 0x278: 0x26, 0x279: 0x26, 0x27a: 0x26, 0x27b: 0x26, 0x27c: 0x26, 0x27d: 0x26, 0x27e: 0x26, 0x27f: 0x26, + // Block 0xa, offset 0x280 + 0x280: 0x26, 0x281: 0x26, 0x282: 0x26, 0x283: 0x26, 0x284: 0x26, 0x285: 0x26, 0x286: 0x26, 0x287: 0x26, + 0x288: 0x26, 0x289: 0x26, 0x28a: 0x26, 0x28b: 0x26, 0x28c: 0x26, 0x28d: 0x26, 0x28e: 0x26, 0x28f: 0x26, + 0x290: 0x26, 0x291: 0x26, 0x292: 0x26, 0x293: 0x26, 0x294: 0x26, 0x295: 0x26, 0x296: 0x26, 0x297: 0x26, + 0x298: 0x26, 0x299: 0x26, 0x29a: 0x26, 0x29b: 0x26, 0x29c: 0x26, 0x29d: 0x26, 0x29e: 0xa3, 0x29f: 0xa4, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x12, 0x2ed: 0xa5, 0x2ee: 0xa6, 0x2ef: 0xa7, + 0x2f0: 0x26, 0x2f1: 0x26, 0x2f2: 0x26, 0x2f3: 0x26, 0x2f4: 0xa8, 0x2f5: 0xa9, 0x2f6: 0xaa, 0x2f7: 0xab, + 0x2f8: 0xac, 0x2f9: 0xad, 0x2fa: 0x26, 0x2fb: 0xae, 0x2fc: 0xaf, 0x2fd: 0xb0, 0x2fe: 0xb1, 0x2ff: 0xb2, + // Block 0xc, offset 0x300 + 0x300: 0xb3, 0x301: 0xb4, 0x302: 0x26, 0x303: 0xb5, 0x305: 0xb6, 0x307: 0xb7, + 0x30a: 0xb8, 0x30b: 0xb9, 0x30c: 0xba, 0x30d: 0xbb, 0x30e: 0xbc, 0x30f: 0xbd, + 0x310: 0xbe, 0x311: 0xbf, 0x312: 0xc0, 0x313: 0xc1, 0x314: 0xc2, 0x315: 0xc3, 0x316: 0x13, + 0x318: 0x26, 0x319: 0x26, 0x31a: 0x26, 0x31b: 0x26, 0x31c: 0xc4, 0x31d: 0xc5, 0x31e: 0xc6, + 0x320: 0xc7, 0x321: 0xc8, 0x322: 0xc9, 0x323: 0xca, 0x324: 0xcb, 0x326: 0xcc, + 0x328: 0xcd, 0x329: 0xce, 0x32a: 0xcf, 0x32b: 0xd0, 0x32c: 0x60, 0x32d: 0xd1, 0x32e: 0xd2, + 0x330: 0x26, 0x331: 0xd3, 0x332: 0xd4, 0x333: 0xd5, 0x334: 0xd6, + 0x33a: 0xd7, 0x33b: 0xd8, 0x33c: 0xd9, 0x33d: 0xda, 0x33e: 0xdb, 0x33f: 0xdc, + // Block 0xd, offset 0x340 + 0x340: 0xdd, 0x341: 0xde, 0x342: 0xdf, 0x343: 0xe0, 0x344: 0xe1, 0x345: 0xe2, 0x346: 0xe3, 0x347: 0xe4, + 0x348: 0xe5, 0x349: 0xe6, 0x34a: 0xe7, 0x34b: 0xe8, 0x34c: 0xe9, 0x34d: 0xea, + 0x350: 0xeb, 0x351: 0xec, 0x352: 0xed, 0x353: 0xee, 0x356: 0xef, 0x357: 0xf0, + 0x358: 0xf1, 0x359: 0xf2, 0x35a: 0xf3, 0x35b: 0xf4, 0x35c: 0xf5, + 0x360: 0xf6, 0x362: 0xf7, 0x363: 0xf8, 0x364: 0xf9, 0x365: 0xfa, 0x366: 0xfb, 0x367: 0xfc, + 0x368: 0xfd, 0x369: 0xfe, 0x36a: 0xff, 0x36b: 0x100, + 0x370: 0x101, 0x371: 0x102, 0x372: 0x103, 0x374: 0x104, 0x375: 0x105, 0x376: 0x106, + 0x37b: 0x107, 0x37c: 0x108, 0x37d: 0x109, 0x37e: 0x10a, + // Block 0xe, offset 0x380 + 0x380: 0x26, 0x381: 0x26, 0x382: 0x26, 0x383: 0x26, 0x384: 0x26, 0x385: 0x26, 0x386: 0x26, 0x387: 0x26, + 0x388: 0x26, 0x389: 0x26, 0x38a: 0x26, 0x38b: 0x26, 0x38c: 0x26, 0x38d: 0x26, 0x38e: 0x10b, + 0x390: 0x26, 0x391: 0x10c, 0x392: 0x26, 0x393: 0x26, 0x394: 0x26, 0x395: 0x10d, + 0x3be: 0xa9, 0x3bf: 0x10e, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x26, 0x3c1: 0x26, 0x3c2: 0x26, 0x3c3: 0x26, 0x3c4: 0x26, 0x3c5: 0x26, 0x3c6: 0x26, 0x3c7: 0x26, + 0x3c8: 0x26, 0x3c9: 0x26, 0x3ca: 0x26, 0x3cb: 0x26, 0x3cc: 0x26, 0x3cd: 0x26, 0x3ce: 0x26, 0x3cf: 0x26, + 0x3d0: 0x10f, 0x3d1: 0x110, + // Block 0x10, offset 0x400 + 0x410: 0x26, 0x411: 0x26, 0x412: 0x26, 0x413: 0x26, 0x414: 0x26, 0x415: 0x26, 0x416: 0x26, 0x417: 0x26, + 0x418: 0x26, 0x419: 0x111, + // Block 0x11, offset 0x440 + 0x460: 0x26, 0x461: 0x26, 0x462: 0x26, 0x463: 0x26, 0x464: 0x26, 0x465: 0x26, 0x466: 0x26, 0x467: 0x26, + 0x468: 0x100, 0x469: 0x112, 0x46a: 0x113, 0x46b: 0x114, 0x46c: 0x115, 0x46d: 0x116, 0x46e: 0x117, + 0x479: 0x118, 0x47c: 0x26, 0x47d: 0x119, 0x47e: 0x11a, 0x47f: 0x11b, + // Block 0x12, offset 0x480 + 0x4bf: 0x11c, + // Block 0x13, offset 0x4c0 + 0x4f0: 0x26, 0x4f1: 0x11d, 0x4f2: 0x11e, + // Block 0x14, offset 0x500 + 0x53c: 0x11f, 0x53d: 0x120, + // Block 0x15, offset 0x540 + 0x545: 0x121, 0x546: 0x122, + 0x549: 0x123, + 0x550: 0x124, 0x551: 0x125, 0x552: 0x126, 0x553: 0x127, 0x554: 0x128, 0x555: 0x129, 0x556: 0x12a, 0x557: 0x12b, + 0x558: 0x12c, 0x559: 0x12d, 0x55a: 0x12e, 0x55b: 0x12f, 0x55c: 0x130, 0x55d: 0x131, 0x55e: 0x132, 0x55f: 0x133, + 0x568: 0x134, 0x569: 0x135, 0x56a: 0x136, + 0x57c: 0x137, + // Block 0x16, offset 0x580 + 0x580: 0x138, 0x581: 0x139, 0x582: 0x13a, 0x584: 0x13b, 0x585: 0x13c, + 0x58a: 0x13d, 0x58b: 0x13e, + 0x593: 0x13f, + 0x59f: 0x140, + 0x5a0: 0x26, 0x5a1: 0x26, 0x5a2: 0x26, 0x5a3: 0x141, 0x5a4: 0x14, 0x5a5: 0x142, + 0x5b8: 0x143, 0x5b9: 0x15, 0x5ba: 0x144, + // Block 0x17, offset 0x5c0 + 0x5c4: 0x145, 0x5c5: 0x146, 0x5c6: 0x147, + 0x5cf: 0x148, + 0x5ef: 0x149, + // Block 0x18, offset 0x600 + 0x610: 0x0a, 0x611: 0x0b, 0x612: 0x0c, 0x613: 0x0d, 0x614: 0x0e, 0x616: 0x0f, + 0x61a: 0x10, 0x61b: 0x11, 0x61c: 0x12, 0x61d: 0x13, 0x61e: 0x14, 0x61f: 0x15, + // Block 0x19, offset 0x640 + 0x640: 0x14a, 0x641: 0x14b, 0x644: 0x14b, 0x645: 0x14b, 0x646: 0x14b, 0x647: 0x14c, + // Block 0x1a, offset 0x680 + 0x6a0: 0x17, +} + +// sparseOffsets: 312 entries, 624 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x34, 0x37, 0x3b, 0x3e, 0x42, 0x4c, 0x4e, 0x57, 0x5e, 0x63, 0x71, 0x72, 0x80, 0x8f, 0x99, 0x9c, 0xa3, 0xab, 0xaf, 0xb7, 0xbd, 0xcb, 0xd6, 0xe3, 0xee, 0xfa, 0x104, 0x110, 0x11b, 0x127, 0x133, 0x13b, 0x145, 0x150, 0x15b, 0x167, 0x16d, 0x178, 0x17e, 0x186, 0x189, 0x18e, 0x192, 0x196, 0x19d, 0x1a6, 0x1ae, 0x1af, 0x1b8, 0x1bf, 0x1c7, 0x1cd, 0x1d2, 0x1d6, 0x1d9, 0x1db, 0x1de, 0x1e3, 0x1e4, 0x1e6, 0x1e8, 0x1ea, 0x1f1, 0x1f6, 0x1fa, 0x203, 0x206, 0x209, 0x20f, 0x210, 0x21b, 0x21c, 0x21d, 0x222, 0x22f, 0x238, 0x23e, 0x246, 0x24f, 0x258, 0x261, 0x266, 0x269, 0x274, 0x282, 0x284, 0x28b, 0x28f, 0x29b, 0x29c, 0x2a7, 0x2af, 0x2b7, 0x2bd, 0x2be, 0x2cc, 0x2d1, 0x2d4, 0x2d9, 0x2dd, 0x2e3, 0x2e8, 0x2eb, 0x2f0, 0x2f5, 0x2f6, 0x2fc, 0x2fe, 0x2ff, 0x301, 0x303, 0x306, 0x307, 0x309, 0x30c, 0x312, 0x316, 0x318, 0x31d, 0x324, 0x334, 0x33e, 0x33f, 0x348, 0x34c, 0x351, 0x359, 0x35f, 0x365, 0x36f, 0x374, 0x37d, 0x383, 0x38c, 0x390, 0x398, 0x39a, 0x39c, 0x39f, 0x3a1, 0x3a3, 0x3a4, 0x3a5, 0x3a7, 0x3a9, 0x3af, 0x3b4, 0x3b6, 0x3bd, 0x3c0, 0x3c2, 0x3c8, 0x3cd, 0x3cf, 0x3d0, 0x3d1, 0x3d2, 0x3d4, 0x3d6, 0x3d8, 0x3db, 0x3dd, 0x3e0, 0x3e8, 0x3eb, 0x3ef, 0x3f7, 0x3f9, 0x409, 0x40a, 0x40c, 0x411, 0x417, 0x419, 0x41a, 0x41c, 0x41e, 0x420, 0x42d, 0x42e, 0x42f, 0x433, 0x435, 0x436, 0x437, 0x438, 0x439, 0x43c, 0x43f, 0x440, 0x443, 0x44a, 0x450, 0x452, 0x456, 0x45e, 0x464, 0x468, 0x46f, 0x473, 0x477, 0x480, 0x48a, 0x48c, 0x492, 0x498, 0x4a2, 0x4ac, 0x4ae, 0x4b7, 0x4bd, 0x4c3, 0x4c9, 0x4cc, 0x4d2, 0x4d5, 0x4de, 0x4df, 0x4e6, 0x4ea, 0x4eb, 0x4ee, 0x4f8, 0x4fb, 0x4fd, 0x504, 0x50c, 0x512, 0x519, 0x51a, 0x520, 0x523, 0x52b, 0x532, 0x53c, 0x544, 0x547, 0x54c, 0x550, 0x551, 0x552, 0x553, 0x554, 0x555, 0x557, 0x55a, 0x55b, 0x55e, 0x55f, 0x562, 0x564, 0x568, 0x569, 0x56b, 0x56e, 0x570, 0x573, 0x576, 0x578, 0x57d, 0x57f, 0x580, 0x585, 0x589, 0x58a, 0x58d, 0x591, 0x59c, 0x5a0, 0x5a8, 0x5ad, 0x5b1, 0x5b4, 0x5b8, 0x5bb, 0x5be, 0x5c3, 0x5c7, 0x5cb, 0x5cf, 0x5d3, 0x5d5, 0x5d7, 0x5da, 0x5de, 0x5e4, 0x5e5, 0x5e6, 0x5e9, 0x5eb, 0x5ed, 0x5f0, 0x5f5, 0x5f9, 0x5fb, 0x601, 0x60a, 0x60f, 0x610, 0x613, 0x614, 0x615, 0x616, 0x618, 0x619, 0x61a} + +// sparseValues: 1562 entries, 6248 bytes +var sparseValues = [1562]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9d}, + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x37 + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x3b + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x3e + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x42 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x4c + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x4e + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0054, lo: 0x9f, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa0}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x57 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xaf, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xe, offset 0x5e + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xf, offset 0x63 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x10, offset 0x71 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x11, offset 0x72 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x12, offset 0x80 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x8f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x14, offset 0x99 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x15, offset 0x9c + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0x16, offset 0xa3 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x17, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0xa0, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x18, offset 0xaf + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0004, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0024, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + {value: 0x0024, lo: 0x9c, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x19, offset 0xb7 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1a, offset 0xbd + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1b, offset 0xcb + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1c, offset 0xd6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xe3 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x1e, offset 0xee + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x1f, offset 0xfa + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x20, offset 0x104 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xbf}, + // Block 0x21, offset 0x110 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x22, offset 0x11b + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x23, offset 0x127 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x24, offset 0x133 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x26, offset 0x145 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x27, offset 0x150 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x28, offset 0x15b + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9d, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb3}, + // Block 0x29, offset 0x167 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2a, offset 0x16d + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2b, offset 0x178 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2c, offset 0x17e + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2d, offset 0x186 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x2e, offset 0x189 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x2f, offset 0x18e + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x30, offset 0x192 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x31, offset 0x196 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x32, offset 0x19d + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x33, offset 0x1a6 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x34, offset 0x1ae + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x35, offset 0x1af + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x36, offset 0x1b8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x37, offset 0x1bf + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x38, offset 0x1c7 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x39, offset 0x1cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3a, offset 0x1d2 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3b, offset 0x1d6 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3c, offset 0x1d9 + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x3d, offset 0x1db + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x3e, offset 0x1de + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x3f, offset 0x1e3 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x40, offset 0x1e4 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x41, offset 0x1e6 + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x42, offset 0x1e8 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x43, offset 0x1ea + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0030, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x9f, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0030, lo: 0xb4, hi: 0xb4}, + // Block 0x44, offset 0x1f1 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x45, offset 0x1f6 + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x46, offset 0x1fa + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x47, offset 0x203 + {value: 0x0014, lo: 0x8b, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x48, offset 0x206 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb8}, + // Block 0x49, offset 0x209 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4a, offset 0x20f + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4b, offset 0x210 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4c, offset 0x21b + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x4d, offset 0x21c + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x4e, offset 0x21d + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x4f, offset 0x222 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x50, offset 0x22f + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x51, offset 0x238 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0024, lo: 0x81, hi: 0x82}, + {value: 0x0034, lo: 0x83, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8e}, + // Block 0x52, offset 0x23e + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x246 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x54, offset 0x24f + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x55, offset 0x258 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x56, offset 0x261 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x57, offset 0x266 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x58, offset 0x269 + {value: 0x31ea, lo: 0x80, hi: 0x80}, + {value: 0x326a, lo: 0x81, hi: 0x81}, + {value: 0x32ea, lo: 0x82, hi: 0x82}, + {value: 0x336a, lo: 0x83, hi: 0x83}, + {value: 0x33ea, lo: 0x84, hi: 0x84}, + {value: 0x346a, lo: 0x85, hi: 0x85}, + {value: 0x34ea, lo: 0x86, hi: 0x86}, + {value: 0x356a, lo: 0x87, hi: 0x87}, + {value: 0x35ea, lo: 0x88, hi: 0x88}, + {value: 0x8353, lo: 0x90, hi: 0xba}, + {value: 0x8353, lo: 0xbd, hi: 0xbf}, + // Block 0x59, offset 0x274 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb7}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xba}, + // Block 0x5a, offset 0x282 + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5b, offset 0x284 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8752, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8b52, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5c, offset 0x28b + {value: 0x0012, lo: 0x80, hi: 0x8d}, + {value: 0x8f52, lo: 0x8e, hi: 0x8e}, + {value: 0x0012, lo: 0x8f, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5d, offset 0x28f + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x5e, offset 0x29b + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x5f, offset 0x29c + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x369a, lo: 0x96, hi: 0x96}, + {value: 0x374a, lo: 0x97, hi: 0x97}, + {value: 0x37fa, lo: 0x98, hi: 0x98}, + {value: 0x38aa, lo: 0x99, hi: 0x99}, + {value: 0x395a, lo: 0x9a, hi: 0x9a}, + {value: 0x3a0a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x3abb, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x60, offset 0x2a7 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x61, offset 0x2af + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x62, offset 0x2b7 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x2bd + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x64, offset 0x2be + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x65, offset 0x2cc + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0xa452, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x66, offset 0x2d1 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x67, offset 0x2d4 + {value: 0xa753, lo: 0xb6, hi: 0xb7}, + {value: 0xaa53, lo: 0xb8, hi: 0xb9}, + {value: 0xad53, lo: 0xba, hi: 0xbb}, + {value: 0xaa53, lo: 0xbc, hi: 0xbd}, + {value: 0xa753, lo: 0xbe, hi: 0xbf}, + // Block 0x68, offset 0x2d9 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xb053, lo: 0xa0, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x69, offset 0x2dd + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6a, offset 0x2e3 + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e8 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6c, offset 0x2eb + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6d, offset 0x2f0 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x6e, offset 0x2f5 + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x6f, offset 0x2f6 + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x70, offset 0x2fc + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x71, offset 0x2fe + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x72, offset 0x2ff + {value: 0x0010, lo: 0x85, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x73, offset 0x301 + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x74, offset 0x303 + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x75, offset 0x306 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x76, offset 0x307 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x77, offset 0x309 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x78, offset 0x30c + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x79, offset 0x312 + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7a, offset 0x316 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7b, offset 0x318 + {value: 0x0004, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7c, offset 0x31d + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8753, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7d, offset 0x324 + {value: 0x0117, lo: 0x80, hi: 0x83}, + {value: 0x6553, lo: 0x84, hi: 0x84}, + {value: 0x908b, lo: 0x85, hi: 0x85}, + {value: 0x8f53, lo: 0x86, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0117, lo: 0x90, hi: 0x91}, + {value: 0x0012, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x95, hi: 0x95}, + {value: 0x0117, lo: 0x96, hi: 0x99}, + {value: 0x0015, lo: 0xb2, hi: 0xb4}, + {value: 0x0316, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x7e, offset 0x334 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + // Block 0x7f, offset 0x33e + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x80, offset 0x33f + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x348 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x82, offset 0x34c + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x83, offset 0x351 + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x84, offset 0x359 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x85, offset 0x35f + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x86, offset 0x365 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x87, offset 0x36f + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x374 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x89, offset 0x37d + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8a, offset 0x383 + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xb352, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa8}, + {value: 0x0015, lo: 0xa9, hi: 0xa9}, + {value: 0x0004, lo: 0xaa, hi: 0xab}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8b, offset 0x38c + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x390 + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8d, offset 0x398 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x39a + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x8f, offset 0x39c + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x90, offset 0x39f + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x91, offset 0x3a1 + {value: 0x0004, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x92, offset 0x3a3 + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x93, offset 0x3a4 + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x94, offset 0x3a5 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x95, offset 0x3a7 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x96, offset 0x3a9 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x97, offset 0x3af + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x98, offset 0x3b4 + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x99, offset 0x3b6 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x3bd + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9b, offset 0x3c0 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9c, offset 0x3c2 + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9d, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x3cd + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0x9f, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa0, offset 0x3d0 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa1, offset 0x3d1 + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa2, offset 0x3d2 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x3d4 + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa4, offset 0x3d6 + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xad, hi: 0xbf}, + // Block 0xa5, offset 0x3d8 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa6, offset 0x3db + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa7, offset 0x3dd + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xa8, offset 0x3e0 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xb653, lo: 0x98, hi: 0x9f}, + {value: 0xb953, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xa9, offset 0x3e8 + {value: 0xb652, lo: 0x80, hi: 0x87}, + {value: 0xb952, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xaa, offset 0x3eb + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb953, lo: 0xb0, hi: 0xb7}, + {value: 0xb653, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3ef + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb952, lo: 0x98, hi: 0x9f}, + {value: 0xb652, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xac, offset 0x3f7 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xad, offset 0x3f9 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0xbc53, lo: 0xb0, hi: 0xb0}, + {value: 0xbf53, lo: 0xb1, hi: 0xb1}, + {value: 0xc253, lo: 0xb2, hi: 0xb2}, + {value: 0xbf53, lo: 0xb3, hi: 0xb3}, + {value: 0xc553, lo: 0xb4, hi: 0xb4}, + {value: 0xbf53, lo: 0xb5, hi: 0xb5}, + {value: 0xc253, lo: 0xb6, hi: 0xb6}, + {value: 0xbf53, lo: 0xb7, hi: 0xb7}, + {value: 0xbc53, lo: 0xb8, hi: 0xb8}, + {value: 0xc853, lo: 0xb9, hi: 0xb9}, + {value: 0xcb53, lo: 0xba, hi: 0xba}, + {value: 0xce53, lo: 0xbc, hi: 0xbc}, + {value: 0xc853, lo: 0xbd, hi: 0xbd}, + {value: 0xcb53, lo: 0xbe, hi: 0xbe}, + {value: 0xc853, lo: 0xbf, hi: 0xbf}, + // Block 0xae, offset 0x409 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xaf, offset 0x40a + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb0, offset 0x40c + {value: 0x0015, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0015, lo: 0x83, hi: 0x85}, + {value: 0x0015, lo: 0x87, hi: 0xb0}, + {value: 0x0015, lo: 0xb2, hi: 0xba}, + // Block 0xb1, offset 0x411 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb2, offset 0x417 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb3, offset 0x419 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb4, offset 0x41a + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb5, offset 0x41c + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb6, offset 0x41e + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb7, offset 0x420 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb5}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x42d + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xb9, offset 0x42e + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xba, offset 0x42f + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbb, offset 0x433 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbc, offset 0x435 + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbd, offset 0x436 + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbe, offset 0x437 + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xbf, offset 0x438 + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x439 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc1, offset 0x43c + {value: 0x0010, lo: 0x80, hi: 0xa9}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + // Block 0xc2, offset 0x43f + {value: 0x0034, lo: 0xbd, hi: 0xbf}, + // Block 0xc3, offset 0x440 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc4, offset 0x443 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x87}, + {value: 0x0024, lo: 0x88, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x8b}, + {value: 0x0024, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc5, offset 0x44a + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x82}, + {value: 0x0034, lo: 0x83, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xc6, offset 0x450 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xc7, offset 0x452 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc8, offset 0x456 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc9, offset 0x45e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xca, offset 0x464 + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcb, offset 0x468 + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xcc, offset 0x46f + {value: 0x0010, lo: 0x84, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xcd, offset 0x473 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xce, offset 0x477 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x89, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xcf, offset 0x480 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xd0, offset 0x48a + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + // Block 0xd1, offset 0x48c + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xd2, offset 0x492 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd3, offset 0x498 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x4a2 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xd5, offset 0x4ac + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xd6, offset 0x4ae + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + // Block 0xd7, offset 0x4b7 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd8, offset 0x4bd + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd9, offset 0x4c3 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xda, offset 0x4c9 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xdb, offset 0x4cc + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xdc, offset 0x4d2 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xdd, offset 0x4d5 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + // Block 0xde, offset 0x4de + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xdf, offset 0x4df + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xe0, offset 0x4e6 + {value: 0x0010, lo: 0x80, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + // Block 0xe1, offset 0x4ea + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xe2, offset 0x4eb + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x4ee + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8c, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + {value: 0x0030, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xe4, offset 0x4f8 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0034, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xe5, offset 0x4fb + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xe6, offset 0x4fd + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0014, lo: 0x94, hi: 0x97}, + {value: 0x0014, lo: 0x9a, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0x9f}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + // Block 0xe7, offset 0x504 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x8a}, + {value: 0x0010, lo: 0x8b, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbb, hi: 0xbe}, + // Block 0xe8, offset 0x50c + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0014, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x98}, + {value: 0x0014, lo: 0x99, hi: 0x9b}, + {value: 0x0010, lo: 0x9c, hi: 0xbf}, + // Block 0xe9, offset 0x512 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0014, lo: 0x8a, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x99}, + {value: 0x0010, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xea, offset 0x519 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xeb, offset 0x51a + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xec, offset 0x520 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x523 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xee, offset 0x52b + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb6}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xef, offset 0x532 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa5}, + {value: 0x0010, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xbf}, + // Block 0xf0, offset 0x53c + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0014, lo: 0x90, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0x96}, + {value: 0x0034, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xf1, offset 0x544 + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + // Block 0xf2, offset 0x547 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xf3, offset 0x54c + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0030, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xf4, offset 0x550 + {value: 0x0010, lo: 0xb0, hi: 0xb0}, + // Block 0xf5, offset 0x551 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xf6, offset 0x552 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xf7, offset 0x553 + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xf8, offset 0x554 + {value: 0x0010, lo: 0x80, hi: 0xb0}, + // Block 0xf9, offset 0x555 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xbf}, + // Block 0xfa, offset 0x557 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x95}, + // Block 0xfb, offset 0x55a + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xfc, offset 0x55b + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xfd, offset 0x55e + {value: 0x0010, lo: 0x80, hi: 0xbe}, + // Block 0xfe, offset 0x55f + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xff, offset 0x562 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0x100, offset 0x564 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x101, offset 0x568 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0x102, offset 0x569 + {value: 0x2013, lo: 0x80, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xbf}, + // Block 0x103, offset 0x56b + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x104, offset 0x56e + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0x105, offset 0x570 + {value: 0x0014, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa3, hi: 0xa4}, + {value: 0x0030, lo: 0xb0, hi: 0xb1}, + // Block 0x106, offset 0x573 + {value: 0x0004, lo: 0xb0, hi: 0xb3}, + {value: 0x0004, lo: 0xb5, hi: 0xbb}, + {value: 0x0004, lo: 0xbd, hi: 0xbe}, + // Block 0x107, offset 0x576 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0x108, offset 0x578 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0x109, offset 0x57d + {value: 0x0014, lo: 0x80, hi: 0xad}, + {value: 0x0014, lo: 0xb0, hi: 0xbf}, + // Block 0x10a, offset 0x57f + {value: 0x0014, lo: 0x80, hi: 0x86}, + // Block 0x10b, offset 0x580 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0x10c, offset 0x585 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0x10d, offset 0x589 + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0x10e, offset 0x58a + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0x10f, offset 0x58d + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x110, offset 0x591 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0x111, offset 0x59c + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x112, offset 0x5a0 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0x113, offset 0x5a8 + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0x114, offset 0x5ad + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0x115, offset 0x5b1 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0x116, offset 0x5b4 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0x117, offset 0x5b8 + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x118, offset 0x5bb + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0x119, offset 0x5be + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0x11a, offset 0x5c3 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0x11b, offset 0x5c7 + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x11c, offset 0x5cb + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0x11d, offset 0x5cf + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x11e, offset 0x5d3 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x11f, offset 0x5d5 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x120, offset 0x5d7 + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x121, offset 0x5da + {value: 0x0012, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8a, hi: 0x8a}, + {value: 0x0012, lo: 0x8b, hi: 0x9e}, + {value: 0x0012, lo: 0xa5, hi: 0xaa}, + // Block 0x122, offset 0x5de + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + {value: 0x0015, lo: 0xb0, hi: 0xbf}, + // Block 0x123, offset 0x5e4 + {value: 0x0015, lo: 0x80, hi: 0xad}, + // Block 0x124, offset 0x5e5 + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + // Block 0x125, offset 0x5e6 + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + // Block 0x126, offset 0x5e9 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + // Block 0x127, offset 0x5eb + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xae}, + // Block 0x128, offset 0x5ed + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0024, lo: 0xac, hi: 0xaf}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x129, offset 0x5f0 + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x12a, offset 0x5f5 + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xab}, + {value: 0x0010, lo: 0xad, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xbe}, + // Block 0x12b, offset 0x5f9 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x12c, offset 0x5fb + {value: 0xd152, lo: 0x80, hi: 0x81}, + {value: 0xd452, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x12d, offset 0x601 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x12e, offset 0x60a + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x12f, offset 0x60f + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x130, offset 0x610 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x131, offset 0x613 + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x132, offset 0x614 + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x133, offset 0x615 + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x134, offset 0x616 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x135, offset 0x618 + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x136, offset 0x619 + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 16093 bytes (15KiB); checksum: EE91C452 diff --git a/vendor/golang.org/x/text/cases/tables9.0.0.go b/vendor/golang.org/x/text/cases/tables9.0.0.go new file mode 100644 index 00000000..3aeb7be6 --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables9.0.0.go @@ -0,0 +1,2215 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build !go1.10 + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var xorData string = "" + // Size: 185 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a\x00\x02:" + + "\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&\x00\x01*" + + "\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 2068 bytes + "\x00\x12\x12μΜΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x09II\x13\x1bʼnʼNʼN\x11" + + "\x09sSS\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj" + + "\x12\x12njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x1bǰJ̌J̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10" + + "\x12DZDz\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x1bⱾⱾ\x10\x1bⱿⱿ\x10\x1bⱯⱯ\x10\x1bⱭⱭ\x10" + + "\x1bⱰⱰ\x10\x1bꞫꞫ\x10\x1bꞬꞬ\x10\x1bꞍꞍ\x10\x1bꞪꞪ\x10\x1bꞮꞮ\x10\x1bⱢⱢ\x10" + + "\x1bꞭꞭ\x10\x1bⱮⱮ\x10\x1bⱤⱤ\x10\x1bꞱꞱ\x10\x1bꞲꞲ\x10\x1bꞰꞰ2\x12ιΙΙ\x166ΐ" + + "Ϊ́Ϊ́\x166ΰΫ́Ϋ́\x12\x12σΣΣ\x12\x12βΒΒ\x12\x12θΘΘ\x12\x12φΦΦ\x12" + + "\x12πΠΠ\x12\x12κΚΚ\x12\x12ρΡΡ\x12\x12εΕΕ\x14$եւԵՒԵւ\x12\x12вВВ\x12\x12дД" + + "Д\x12\x12оОО\x12\x12сСС\x12\x12тТТ\x12\x12тТТ\x12\x12ъЪЪ\x12\x12ѣѢѢ\x13" + + "\x1bꙋꙊꙊ\x13\x1bẖH̱H̱\x13\x1bẗT̈T̈\x13\x1bẘW̊W̊\x13\x1bẙY̊Y̊\x13\x1ba" + + "ʾAʾAʾ\x13\x1bṡṠṠ\x12\x10ssß\x14$ὐΥ̓Υ̓\x166ὒΥ̓̀Υ̓̀\x166ὔΥ̓́Υ̓́\x166" + + "ὖΥ̓͂Υ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ" + + "\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ" + + "\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ" + + "\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨ" + + "Ι\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15" + + "\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ" + + "\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ" + + "\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰι" + + "ᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ\x14$ᾶΑ͂Α͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12" + + "\x12ιΙΙ\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14$ῆΗ͂Η͂\x166ῆιΗ͂Ιῌ͂\x14\x1c" + + "ηιῃΗΙ\x166ῒΪ̀Ϊ̀\x166ΐΪ́Ϊ́\x14$ῖΙ͂Ι͂\x166ῗΪ͂Ϊ͂\x166ῢΫ̀Ϋ" + + "̀\x166ΰΫ́Ϋ́\x14$ῤΡ̓Ρ̓\x14$ῦΥ͂Υ͂\x166ῧΫ͂Ϋ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙ" + + "ῼ\x14$ώιΏΙΏͅ\x14$ῶΩ͂Ω͂\x166ῶιΩ͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk" + + "\x12\x10åå\x12\x10ɫɫ\x12\x10ɽɽ\x10\x12ȺȺ\x10\x12ȾȾ\x12\x10ɑɑ\x12\x10ɱɱ" + + "\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ\x12\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ" + + "\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ\x12\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFF" + + "Ff\x12\x12fiFIFi\x12\x12flFLFl\x13\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12" + + "stSTSt\x12\x12stSTSt\x14$մնՄՆՄն\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄ" + + "ԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 11742 bytes (11.47 KiB). Checksum: 795fe57ee5135873. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 18: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 18 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 20 blocks, 1280 entries, 2560 bytes +// The third block is the zero block. +var caseValues = [1280]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x02db, 0x105: 0x0359, + 0x106: 0x03da, 0x107: 0x043b, 0x108: 0x04b9, 0x109: 0x053a, 0x10a: 0x059b, 0x10b: 0x0619, + 0x10c: 0x069a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x06fa, 0x131: 0x07ab, 0x132: 0x0829, 0x133: 0x08aa, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x0a8a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x0b0a, 0x151: 0x0b8a, + 0x152: 0x0c0a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x0c8a, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x0d0a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x0d8a, 0x166: 0x0e0a, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x0e8a, 0x16b: 0x0f0a, 0x16c: 0x0f8a, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x100a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x108a, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x110a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x118a, + 0x19e: 0x120a, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x128d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x130a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x144a, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x158a, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x160a, 0x251: 0x168a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x170a, 0x256: 0x178a, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x180a, 0x271: 0x188a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x190a, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x0812, 0x281: 0x0812, 0x282: 0x0812, 0x283: 0x0812, 0x284: 0x0812, 0x285: 0x0812, + 0x288: 0x0813, 0x289: 0x0813, 0x28a: 0x0813, 0x28b: 0x0813, + 0x28c: 0x0813, 0x28d: 0x0813, 0x290: 0x239a, 0x291: 0x0812, + 0x292: 0x247a, 0x293: 0x0812, 0x294: 0x25ba, 0x295: 0x0812, 0x296: 0x26fa, 0x297: 0x0812, + 0x299: 0x0813, 0x29b: 0x0813, 0x29d: 0x0813, + 0x29f: 0x0813, 0x2a0: 0x0812, 0x2a1: 0x0812, 0x2a2: 0x0812, 0x2a3: 0x0812, + 0x2a4: 0x0812, 0x2a5: 0x0812, 0x2a6: 0x0812, 0x2a7: 0x0812, 0x2a8: 0x0813, 0x2a9: 0x0813, + 0x2aa: 0x0813, 0x2ab: 0x0813, 0x2ac: 0x0813, 0x2ad: 0x0813, 0x2ae: 0x0813, 0x2af: 0x0813, + 0x2b0: 0x8b52, 0x2b1: 0x8b52, 0x2b2: 0x8e52, 0x2b3: 0x8e52, 0x2b4: 0x9152, 0x2b5: 0x9152, + 0x2b6: 0x9452, 0x2b7: 0x9452, 0x2b8: 0x9752, 0x2b9: 0x9752, 0x2ba: 0x9a52, 0x2bb: 0x9a52, + 0x2bc: 0x4d52, 0x2bd: 0x4d52, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x283a, 0x2c1: 0x292a, 0x2c2: 0x2a1a, 0x2c3: 0x2b0a, 0x2c4: 0x2bfa, 0x2c5: 0x2cea, + 0x2c6: 0x2dda, 0x2c7: 0x2eca, 0x2c8: 0x2fb9, 0x2c9: 0x30a9, 0x2ca: 0x3199, 0x2cb: 0x3289, + 0x2cc: 0x3379, 0x2cd: 0x3469, 0x2ce: 0x3559, 0x2cf: 0x3649, 0x2d0: 0x373a, 0x2d1: 0x382a, + 0x2d2: 0x391a, 0x2d3: 0x3a0a, 0x2d4: 0x3afa, 0x2d5: 0x3bea, 0x2d6: 0x3cda, 0x2d7: 0x3dca, + 0x2d8: 0x3eb9, 0x2d9: 0x3fa9, 0x2da: 0x4099, 0x2db: 0x4189, 0x2dc: 0x4279, 0x2dd: 0x4369, + 0x2de: 0x4459, 0x2df: 0x4549, 0x2e0: 0x463a, 0x2e1: 0x472a, 0x2e2: 0x481a, 0x2e3: 0x490a, + 0x2e4: 0x49fa, 0x2e5: 0x4aea, 0x2e6: 0x4bda, 0x2e7: 0x4cca, 0x2e8: 0x4db9, 0x2e9: 0x4ea9, + 0x2ea: 0x4f99, 0x2eb: 0x5089, 0x2ec: 0x5179, 0x2ed: 0x5269, 0x2ee: 0x5359, 0x2ef: 0x5449, + 0x2f0: 0x0812, 0x2f1: 0x0812, 0x2f2: 0x553a, 0x2f3: 0x564a, 0x2f4: 0x571a, + 0x2f6: 0x57fa, 0x2f7: 0x58da, 0x2f8: 0x0813, 0x2f9: 0x0813, 0x2fa: 0x8b53, 0x2fb: 0x8b53, + 0x2fc: 0x5a19, 0x2fd: 0x0004, 0x2fe: 0x5aea, 0x2ff: 0x0004, + // Block 0xc, offset 0x300 + 0x300: 0x0004, 0x301: 0x0004, 0x302: 0x5b6a, 0x303: 0x5c7a, 0x304: 0x5d4a, + 0x306: 0x5e2a, 0x307: 0x5f0a, 0x308: 0x8e53, 0x309: 0x8e53, 0x30a: 0x9153, 0x30b: 0x9153, + 0x30c: 0x6049, 0x30d: 0x0004, 0x30e: 0x0004, 0x30f: 0x0004, 0x310: 0x0812, 0x311: 0x0812, + 0x312: 0x611a, 0x313: 0x625a, 0x316: 0x639a, 0x317: 0x647a, + 0x318: 0x0813, 0x319: 0x0813, 0x31a: 0x9453, 0x31b: 0x9453, 0x31d: 0x0004, + 0x31e: 0x0004, 0x31f: 0x0004, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0x65ba, 0x323: 0x66fa, + 0x324: 0x683a, 0x325: 0x0912, 0x326: 0x691a, 0x327: 0x69fa, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x9a53, 0x32b: 0x9a53, 0x32c: 0x0913, 0x32d: 0x0004, 0x32e: 0x0004, 0x32f: 0x0004, + 0x332: 0x6b3a, 0x333: 0x6c4a, 0x334: 0x6d1a, + 0x336: 0x6dfa, 0x337: 0x6eda, 0x338: 0x9753, 0x339: 0x9753, 0x33a: 0x4d53, 0x33b: 0x4d53, + 0x33c: 0x7019, 0x33d: 0x0004, 0x33e: 0x0004, + // Block 0xd, offset 0x340 + 0x342: 0x0013, + 0x347: 0x0013, 0x34a: 0x0012, 0x34b: 0x0013, + 0x34c: 0x0013, 0x34d: 0x0013, 0x34e: 0x0012, 0x34f: 0x0012, 0x350: 0x0013, 0x351: 0x0013, + 0x352: 0x0013, 0x353: 0x0012, 0x355: 0x0013, + 0x359: 0x0013, 0x35a: 0x0013, 0x35b: 0x0013, 0x35c: 0x0013, 0x35d: 0x0013, + 0x364: 0x0013, 0x366: 0x70eb, 0x368: 0x0013, + 0x36a: 0x714b, 0x36b: 0x718b, 0x36c: 0x0013, 0x36d: 0x0013, 0x36f: 0x0012, + 0x370: 0x0013, 0x371: 0x0013, 0x372: 0x9d53, 0x373: 0x0013, 0x374: 0x0012, 0x375: 0x0010, + 0x376: 0x0010, 0x377: 0x0010, 0x378: 0x0010, 0x379: 0x0012, + 0x37c: 0x0012, 0x37d: 0x0012, 0x37e: 0x0013, 0x37f: 0x0013, + // Block 0xe, offset 0x380 + 0x380: 0x1a13, 0x381: 0x1a13, 0x382: 0x1e13, 0x383: 0x1e13, 0x384: 0x1a13, 0x385: 0x1a13, + 0x386: 0x2613, 0x387: 0x2613, 0x388: 0x2a13, 0x389: 0x2a13, 0x38a: 0x2e13, 0x38b: 0x2e13, + 0x38c: 0x2a13, 0x38d: 0x2a13, 0x38e: 0x2613, 0x38f: 0x2613, 0x390: 0xa052, 0x391: 0xa052, + 0x392: 0xa352, 0x393: 0xa352, 0x394: 0xa652, 0x395: 0xa652, 0x396: 0xa352, 0x397: 0xa352, + 0x398: 0xa052, 0x399: 0xa052, 0x39a: 0x1a12, 0x39b: 0x1a12, 0x39c: 0x1e12, 0x39d: 0x1e12, + 0x39e: 0x1a12, 0x39f: 0x1a12, 0x3a0: 0x2612, 0x3a1: 0x2612, 0x3a2: 0x2a12, 0x3a3: 0x2a12, + 0x3a4: 0x2e12, 0x3a5: 0x2e12, 0x3a6: 0x2a12, 0x3a7: 0x2a12, 0x3a8: 0x2612, 0x3a9: 0x2612, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x6552, 0x3c1: 0x6552, 0x3c2: 0x6552, 0x3c3: 0x6552, 0x3c4: 0x6552, 0x3c5: 0x6552, + 0x3c6: 0x6552, 0x3c7: 0x6552, 0x3c8: 0x6552, 0x3c9: 0x6552, 0x3ca: 0x6552, 0x3cb: 0x6552, + 0x3cc: 0x6552, 0x3cd: 0x6552, 0x3ce: 0x6552, 0x3cf: 0x6552, 0x3d0: 0xa952, 0x3d1: 0xa952, + 0x3d2: 0xa952, 0x3d3: 0xa952, 0x3d4: 0xa952, 0x3d5: 0xa952, 0x3d6: 0xa952, 0x3d7: 0xa952, + 0x3d8: 0xa952, 0x3d9: 0xa952, 0x3da: 0xa952, 0x3db: 0xa952, 0x3dc: 0xa952, 0x3dd: 0xa952, + 0x3de: 0xa952, 0x3e0: 0x0113, 0x3e1: 0x0112, 0x3e2: 0x71eb, 0x3e3: 0x8853, + 0x3e4: 0x724b, 0x3e5: 0x72aa, 0x3e6: 0x730a, 0x3e7: 0x0f13, 0x3e8: 0x0f12, 0x3e9: 0x0313, + 0x3ea: 0x0312, 0x3eb: 0x0713, 0x3ec: 0x0712, 0x3ed: 0x736b, 0x3ee: 0x73cb, 0x3ef: 0x742b, + 0x3f0: 0x748b, 0x3f1: 0x0012, 0x3f2: 0x0113, 0x3f3: 0x0112, 0x3f4: 0x0012, 0x3f5: 0x0313, + 0x3f6: 0x0312, 0x3f7: 0x0012, 0x3f8: 0x0012, 0x3f9: 0x0012, 0x3fa: 0x0012, 0x3fb: 0x0012, + 0x3fc: 0x0015, 0x3fd: 0x0015, 0x3fe: 0x74eb, 0x3ff: 0x754b, + // Block 0x10, offset 0x400 + 0x400: 0x0113, 0x401: 0x0112, 0x402: 0x0113, 0x403: 0x0112, 0x404: 0x0113, 0x405: 0x0112, + 0x406: 0x0113, 0x407: 0x0112, 0x408: 0x0014, 0x409: 0x0004, 0x40a: 0x0004, 0x40b: 0x0713, + 0x40c: 0x0712, 0x40d: 0x75ab, 0x40e: 0x0012, 0x40f: 0x0010, 0x410: 0x0113, 0x411: 0x0112, + 0x412: 0x0113, 0x413: 0x0112, 0x414: 0x0012, 0x415: 0x0012, 0x416: 0x0113, 0x417: 0x0112, + 0x418: 0x0113, 0x419: 0x0112, 0x41a: 0x0113, 0x41b: 0x0112, 0x41c: 0x0113, 0x41d: 0x0112, + 0x41e: 0x0113, 0x41f: 0x0112, 0x420: 0x0113, 0x421: 0x0112, 0x422: 0x0113, 0x423: 0x0112, + 0x424: 0x0113, 0x425: 0x0112, 0x426: 0x0113, 0x427: 0x0112, 0x428: 0x0113, 0x429: 0x0112, + 0x42a: 0x760b, 0x42b: 0x766b, 0x42c: 0x76cb, 0x42d: 0x772b, 0x42e: 0x778b, + 0x430: 0x77eb, 0x431: 0x784b, 0x432: 0x78ab, 0x433: 0xac53, 0x434: 0x0113, 0x435: 0x0112, + 0x436: 0x0113, 0x437: 0x0112, + // Block 0x11, offset 0x440 + 0x440: 0x790a, 0x441: 0x798a, 0x442: 0x7a0a, 0x443: 0x7a8a, 0x444: 0x7b3a, 0x445: 0x7bea, + 0x446: 0x7c6a, + 0x453: 0x7cea, 0x454: 0x7dca, 0x455: 0x7eaa, 0x456: 0x7f8a, 0x457: 0x806a, + 0x45d: 0x0010, + 0x45e: 0x0034, 0x45f: 0x0010, 0x460: 0x0010, 0x461: 0x0010, 0x462: 0x0010, 0x463: 0x0010, + 0x464: 0x0010, 0x465: 0x0010, 0x466: 0x0010, 0x467: 0x0010, 0x468: 0x0010, + 0x46a: 0x0010, 0x46b: 0x0010, 0x46c: 0x0010, 0x46d: 0x0010, 0x46e: 0x0010, 0x46f: 0x0010, + 0x470: 0x0010, 0x471: 0x0010, 0x472: 0x0010, 0x473: 0x0010, 0x474: 0x0010, 0x475: 0x0010, + 0x476: 0x0010, 0x478: 0x0010, 0x479: 0x0010, 0x47a: 0x0010, 0x47b: 0x0010, + 0x47c: 0x0010, 0x47e: 0x0010, + // Block 0x12, offset 0x480 + 0x480: 0x2213, 0x481: 0x2213, 0x482: 0x2613, 0x483: 0x2613, 0x484: 0x2213, 0x485: 0x2213, + 0x486: 0x2e13, 0x487: 0x2e13, 0x488: 0x2213, 0x489: 0x2213, 0x48a: 0x2613, 0x48b: 0x2613, + 0x48c: 0x2213, 0x48d: 0x2213, 0x48e: 0x3e13, 0x48f: 0x3e13, 0x490: 0x2213, 0x491: 0x2213, + 0x492: 0x2613, 0x493: 0x2613, 0x494: 0x2213, 0x495: 0x2213, 0x496: 0x2e13, 0x497: 0x2e13, + 0x498: 0x2213, 0x499: 0x2213, 0x49a: 0x2613, 0x49b: 0x2613, 0x49c: 0x2213, 0x49d: 0x2213, + 0x49e: 0xb553, 0x49f: 0xb553, 0x4a0: 0xb853, 0x4a1: 0xb853, 0x4a2: 0x2212, 0x4a3: 0x2212, + 0x4a4: 0x2612, 0x4a5: 0x2612, 0x4a6: 0x2212, 0x4a7: 0x2212, 0x4a8: 0x2e12, 0x4a9: 0x2e12, + 0x4aa: 0x2212, 0x4ab: 0x2212, 0x4ac: 0x2612, 0x4ad: 0x2612, 0x4ae: 0x2212, 0x4af: 0x2212, + 0x4b0: 0x3e12, 0x4b1: 0x3e12, 0x4b2: 0x2212, 0x4b3: 0x2212, 0x4b4: 0x2612, 0x4b5: 0x2612, + 0x4b6: 0x2212, 0x4b7: 0x2212, 0x4b8: 0x2e12, 0x4b9: 0x2e12, 0x4ba: 0x2212, 0x4bb: 0x2212, + 0x4bc: 0x2612, 0x4bd: 0x2612, 0x4be: 0x2212, 0x4bf: 0x2212, + // Block 0x13, offset 0x4c0 + 0x4c2: 0x0010, + 0x4c7: 0x0010, 0x4c9: 0x0010, 0x4cb: 0x0010, + 0x4cd: 0x0010, 0x4ce: 0x0010, 0x4cf: 0x0010, 0x4d1: 0x0010, + 0x4d2: 0x0010, 0x4d4: 0x0010, 0x4d7: 0x0010, + 0x4d9: 0x0010, 0x4db: 0x0010, 0x4dd: 0x0010, + 0x4df: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, + 0x4e4: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, 0x4e9: 0x0010, + 0x4ea: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f7: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x12, 0xc3: 0x13, 0xc4: 0x14, 0xc5: 0x15, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x16, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x17, 0xcc: 0x18, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x19, 0xd1: 0x1a, 0xd2: 0x1b, 0xd3: 0x1c, 0xd4: 0x1d, 0xd5: 0x1e, 0xd6: 0x1f, 0xd7: 0x20, + 0xd8: 0x21, 0xd9: 0x22, 0xda: 0x23, 0xdb: 0x24, 0xdc: 0x25, 0xdd: 0x26, 0xde: 0x27, 0xdf: 0x28, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x29, 0x121: 0x2a, 0x122: 0x2b, 0x123: 0x2c, 0x124: 0x2d, 0x125: 0x2e, 0x126: 0x2f, 0x127: 0x30, + 0x128: 0x31, 0x129: 0x32, 0x12a: 0x33, 0x12b: 0x34, 0x12c: 0x35, 0x12d: 0x36, 0x12e: 0x37, 0x12f: 0x38, + 0x130: 0x39, 0x131: 0x3a, 0x132: 0x3b, 0x133: 0x3c, 0x134: 0x3d, 0x135: 0x3e, 0x136: 0x3f, 0x137: 0x40, + 0x138: 0x41, 0x139: 0x42, 0x13a: 0x43, 0x13b: 0x44, 0x13c: 0x45, 0x13d: 0x46, 0x13e: 0x47, 0x13f: 0x48, + // Block 0x5, offset 0x140 + 0x140: 0x49, 0x141: 0x4a, 0x142: 0x4b, 0x143: 0x4c, 0x144: 0x23, 0x145: 0x23, 0x146: 0x23, 0x147: 0x23, + 0x148: 0x23, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x23, 0x152: 0x23, 0x153: 0x23, 0x154: 0x23, 0x155: 0x23, 0x156: 0x23, 0x157: 0x23, + 0x158: 0x23, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x08, 0x17e: 0x09, 0x17f: 0x0a, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0b, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0c, + 0x1b0: 0x7c, 0x1b1: 0x0d, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x23, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x23, 0x202: 0x23, 0x203: 0x23, 0x204: 0x23, 0x205: 0x23, 0x206: 0x23, 0x207: 0x23, + 0x208: 0x23, 0x209: 0x23, 0x20a: 0x23, 0x20b: 0x23, 0x20c: 0x23, 0x20d: 0x23, 0x20e: 0x23, 0x20f: 0x23, + 0x210: 0x23, 0x211: 0x23, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x23, 0x215: 0x23, 0x216: 0x23, 0x217: 0x23, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x0e, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x23, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x23, 0x231: 0x23, 0x232: 0x23, 0x233: 0x23, 0x234: 0x23, 0x235: 0x23, 0x236: 0x23, 0x237: 0x23, + 0x238: 0x23, 0x239: 0x23, 0x23a: 0x23, 0x23b: 0x23, 0x23c: 0x23, 0x23d: 0x23, 0x23e: 0x23, 0x23f: 0x23, + // Block 0x9, offset 0x240 + 0x240: 0x23, 0x241: 0x23, 0x242: 0x23, 0x243: 0x23, 0x244: 0x23, 0x245: 0x23, 0x246: 0x23, 0x247: 0x23, + 0x248: 0x23, 0x249: 0x23, 0x24a: 0x23, 0x24b: 0x23, 0x24c: 0x23, 0x24d: 0x23, 0x24e: 0x23, 0x24f: 0x23, + 0x250: 0x23, 0x251: 0x23, 0x252: 0x23, 0x253: 0x23, 0x254: 0x23, 0x255: 0x23, 0x256: 0x23, 0x257: 0x23, + 0x258: 0x23, 0x259: 0x23, 0x25a: 0x23, 0x25b: 0x23, 0x25c: 0x23, 0x25d: 0x23, 0x25e: 0x23, 0x25f: 0x23, + 0x260: 0x23, 0x261: 0x23, 0x262: 0x23, 0x263: 0x23, 0x264: 0x23, 0x265: 0x23, 0x266: 0x23, 0x267: 0x23, + 0x268: 0x23, 0x269: 0x23, 0x26a: 0x23, 0x26b: 0x23, 0x26c: 0x23, 0x26d: 0x23, 0x26e: 0x23, 0x26f: 0x23, + 0x270: 0x23, 0x271: 0x23, 0x272: 0x23, 0x273: 0x23, 0x274: 0x23, 0x275: 0x23, 0x276: 0x23, 0x277: 0x23, + 0x278: 0x23, 0x279: 0x23, 0x27a: 0x23, 0x27b: 0x23, 0x27c: 0x23, 0x27d: 0x23, 0x27e: 0x23, 0x27f: 0x23, + // Block 0xa, offset 0x280 + 0x280: 0x23, 0x281: 0x23, 0x282: 0x23, 0x283: 0x23, 0x284: 0x23, 0x285: 0x23, 0x286: 0x23, 0x287: 0x23, + 0x288: 0x23, 0x289: 0x23, 0x28a: 0x23, 0x28b: 0x23, 0x28c: 0x23, 0x28d: 0x23, 0x28e: 0x23, 0x28f: 0x23, + 0x290: 0x23, 0x291: 0x23, 0x292: 0x23, 0x293: 0x23, 0x294: 0x23, 0x295: 0x23, 0x296: 0x23, 0x297: 0x23, + 0x298: 0x23, 0x299: 0x23, 0x29a: 0x23, 0x29b: 0x23, 0x29c: 0x23, 0x29d: 0x23, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x0f, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x23, 0x2f1: 0x23, 0x2f2: 0x23, 0x2f3: 0x23, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x23, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x23, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x23, 0x319: 0x23, 0x31a: 0x23, 0x31b: 0x23, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x23, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, + // Block 0xd, offset 0x340 + 0x340: 0xd3, 0x341: 0xd4, 0x342: 0xd5, 0x343: 0xd6, 0x344: 0xd7, 0x345: 0xd8, 0x346: 0xd9, 0x347: 0xda, + 0x348: 0xdb, 0x34a: 0xdc, 0x34b: 0xdd, 0x34c: 0xde, 0x34d: 0xdf, + 0x350: 0xe0, 0x351: 0xe1, 0x352: 0xe2, 0x353: 0xe3, 0x356: 0xe4, 0x357: 0xe5, + 0x358: 0xe6, 0x359: 0xe7, 0x35a: 0xe8, 0x35b: 0xe9, 0x35c: 0xea, + 0x362: 0xeb, 0x363: 0xec, + 0x36b: 0xed, + 0x370: 0xee, 0x371: 0xef, 0x372: 0xf0, + // Block 0xe, offset 0x380 + 0x380: 0x23, 0x381: 0x23, 0x382: 0x23, 0x383: 0x23, 0x384: 0x23, 0x385: 0x23, 0x386: 0x23, 0x387: 0x23, + 0x388: 0x23, 0x389: 0x23, 0x38a: 0x23, 0x38b: 0x23, 0x38c: 0x23, 0x38d: 0x23, 0x38e: 0xf1, + 0x390: 0x23, 0x391: 0xf2, 0x392: 0x23, 0x393: 0x23, 0x394: 0x23, 0x395: 0xf3, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x23, 0x3c1: 0x23, 0x3c2: 0x23, 0x3c3: 0x23, 0x3c4: 0x23, 0x3c5: 0x23, 0x3c6: 0x23, 0x3c7: 0x23, + 0x3c8: 0x23, 0x3c9: 0x23, 0x3ca: 0x23, 0x3cb: 0x23, 0x3cc: 0x23, 0x3cd: 0x23, 0x3ce: 0x23, 0x3cf: 0x23, + 0x3d0: 0xf2, + // Block 0x10, offset 0x400 + 0x410: 0x23, 0x411: 0x23, 0x412: 0x23, 0x413: 0x23, 0x414: 0x23, 0x415: 0x23, 0x416: 0x23, 0x417: 0x23, + 0x418: 0x23, 0x419: 0xf4, + // Block 0x11, offset 0x440 + 0x460: 0x23, 0x461: 0x23, 0x462: 0x23, 0x463: 0x23, 0x464: 0x23, 0x465: 0x23, 0x466: 0x23, 0x467: 0x23, + 0x468: 0xed, 0x469: 0xf5, 0x46b: 0xf6, 0x46c: 0xf7, 0x46d: 0xf8, 0x46e: 0xf9, + 0x47c: 0x23, 0x47d: 0xfa, 0x47e: 0xfb, 0x47f: 0xfc, + // Block 0x12, offset 0x480 + 0x4b0: 0x23, 0x4b1: 0xfd, 0x4b2: 0xfe, + // Block 0x13, offset 0x4c0 + 0x4c5: 0xff, 0x4c6: 0x100, + 0x4c9: 0x101, + 0x4d0: 0x102, 0x4d1: 0x103, 0x4d2: 0x104, 0x4d3: 0x105, 0x4d4: 0x106, 0x4d5: 0x107, 0x4d6: 0x108, 0x4d7: 0x109, + 0x4d8: 0x10a, 0x4d9: 0x10b, 0x4da: 0x10c, 0x4db: 0x10d, 0x4dc: 0x10e, 0x4dd: 0x10f, 0x4de: 0x110, 0x4df: 0x111, + 0x4e8: 0x112, 0x4e9: 0x113, 0x4ea: 0x114, + // Block 0x14, offset 0x500 + 0x500: 0x115, + 0x520: 0x23, 0x521: 0x23, 0x522: 0x23, 0x523: 0x116, 0x524: 0x10, 0x525: 0x117, + 0x538: 0x118, 0x539: 0x11, 0x53a: 0x119, + // Block 0x15, offset 0x540 + 0x544: 0x11a, 0x545: 0x11b, 0x546: 0x11c, + 0x54f: 0x11d, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x11e, 0x5c1: 0x11f, 0x5c4: 0x11f, 0x5c5: 0x11f, 0x5c6: 0x11f, 0x5c7: 0x120, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 272 entries, 544 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x3a, 0x3d, 0x41, 0x44, 0x48, 0x52, 0x54, 0x59, 0x69, 0x70, 0x75, 0x83, 0x84, 0x92, 0xa1, 0xab, 0xae, 0xb4, 0xbc, 0xbe, 0xc0, 0xce, 0xd4, 0xe2, 0xed, 0xf8, 0x103, 0x10f, 0x119, 0x124, 0x12f, 0x13b, 0x147, 0x14f, 0x157, 0x161, 0x16c, 0x178, 0x17e, 0x189, 0x18e, 0x196, 0x199, 0x19e, 0x1a2, 0x1a6, 0x1ad, 0x1b6, 0x1be, 0x1bf, 0x1c8, 0x1cf, 0x1d7, 0x1dd, 0x1e3, 0x1e8, 0x1ec, 0x1ef, 0x1f1, 0x1f4, 0x1f9, 0x1fa, 0x1fc, 0x1fe, 0x200, 0x207, 0x20c, 0x210, 0x219, 0x21c, 0x21f, 0x225, 0x226, 0x231, 0x232, 0x233, 0x238, 0x245, 0x24d, 0x255, 0x25e, 0x267, 0x270, 0x275, 0x278, 0x281, 0x28e, 0x290, 0x297, 0x299, 0x2a4, 0x2a5, 0x2b0, 0x2b8, 0x2c0, 0x2c6, 0x2c7, 0x2d5, 0x2da, 0x2dd, 0x2e2, 0x2e6, 0x2ec, 0x2f1, 0x2f4, 0x2f9, 0x2fe, 0x2ff, 0x305, 0x307, 0x308, 0x30a, 0x30c, 0x30f, 0x310, 0x312, 0x315, 0x31b, 0x31f, 0x321, 0x327, 0x32e, 0x332, 0x33b, 0x33c, 0x344, 0x348, 0x34d, 0x355, 0x35b, 0x361, 0x36b, 0x370, 0x379, 0x37f, 0x386, 0x38a, 0x392, 0x394, 0x396, 0x399, 0x39b, 0x39d, 0x39e, 0x39f, 0x3a1, 0x3a3, 0x3a9, 0x3ae, 0x3b0, 0x3b6, 0x3b9, 0x3bb, 0x3c1, 0x3c6, 0x3c8, 0x3c9, 0x3ca, 0x3cb, 0x3cd, 0x3cf, 0x3d1, 0x3d4, 0x3d6, 0x3d9, 0x3e1, 0x3e4, 0x3e8, 0x3f0, 0x3f2, 0x3f3, 0x3f4, 0x3f6, 0x3fc, 0x3fe, 0x3ff, 0x401, 0x403, 0x405, 0x412, 0x413, 0x414, 0x418, 0x41a, 0x41b, 0x41c, 0x41d, 0x41e, 0x422, 0x426, 0x42c, 0x42e, 0x435, 0x438, 0x43c, 0x442, 0x44b, 0x451, 0x457, 0x461, 0x46b, 0x46d, 0x474, 0x47a, 0x480, 0x486, 0x489, 0x48f, 0x492, 0x49a, 0x49b, 0x4a2, 0x4a3, 0x4a6, 0x4a7, 0x4ad, 0x4b0, 0x4b8, 0x4b9, 0x4ba, 0x4bb, 0x4bc, 0x4be, 0x4c0, 0x4c2, 0x4c6, 0x4c7, 0x4c9, 0x4ca, 0x4cb, 0x4cd, 0x4d2, 0x4d7, 0x4db, 0x4dc, 0x4df, 0x4e3, 0x4ee, 0x4f2, 0x4fa, 0x4ff, 0x503, 0x506, 0x50a, 0x50d, 0x510, 0x515, 0x519, 0x51d, 0x521, 0x525, 0x527, 0x529, 0x52c, 0x531, 0x533, 0x538, 0x541, 0x546, 0x547, 0x54a, 0x54b, 0x54c, 0x54e, 0x54f, 0x550} + +// sparseValues: 1360 entries, 5440 bytes +var sparseValues = [1360]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x001a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x009a, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x011b, lo: 0xb0, hi: 0xb0}, + {value: 0x019a, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x01da, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x028a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x090b, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x098b, lo: 0xbe, hi: 0xbe}, + {value: 0x0a0a, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0004, lo: 0x82, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x91}, + {value: 0x0004, lo: 0x92, hi: 0x96}, + {value: 0x0054, lo: 0x97, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xac}, + {value: 0x0004, lo: 0xad, hi: 0xad}, + {value: 0x0014, lo: 0xae, hi: 0xae}, + {value: 0x0004, lo: 0xaf, hi: 0xbf}, + // Block 0x6, offset 0x3a + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x3d + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x41 + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x44 + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x48 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x52 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x54 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x59 + {value: 0x6852, lo: 0x80, hi: 0x86}, + {value: 0x198a, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0024, lo: 0x92, hi: 0x95}, + {value: 0x0034, lo: 0x96, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x99}, + {value: 0x0034, lo: 0x9a, hi: 0x9b}, + {value: 0x0024, lo: 0x9c, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa7}, + {value: 0x0024, lo: 0xa8, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xbd}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x69 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xf, offset 0x70 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x10, offset 0x75 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x11, offset 0x83 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x12, offset 0x84 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x92 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x14, offset 0xa1 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x15, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x16, offset 0xae + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + // Block 0x17, offset 0xb4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x18, offset 0xbc + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + // Block 0x19, offset 0xbe + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x1a, offset 0xc0 + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1b, offset 0xce + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1c, offset 0xd4 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1d, offset 0xe2 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0xed + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + // Block 0x1f, offset 0xf8 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x20, offset 0x103 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x21, offset 0x10f + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x22, offset 0x119 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + // Block 0x23, offset 0x124 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x12f + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x26, offset 0x147 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x27, offset 0x14f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x28, offset 0x157 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x2a, offset 0x16c + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2b, offset 0x178 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2c, offset 0x17e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2d, offset 0x189 + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2e, offset 0x18e + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2f, offset 0x196 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x30, offset 0x199 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x31, offset 0x19e + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x32, offset 0x1a2 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x33, offset 0x1a6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x34, offset 0x1ad + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x1b6 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x36, offset 0x1be + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x37, offset 0x1bf + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x38, offset 0x1c8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x39, offset 0x1cf + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d7 + {value: 0x7053, lo: 0x80, hi: 0x85}, + {value: 0x7053, lo: 0x87, hi: 0x87}, + {value: 0x7053, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x3b, offset 0x1dd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x1e3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3d, offset 0x1e8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3e, offset 0x1ec + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3f, offset 0x1ef + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x40, offset 0x1f1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x41, offset 0x1f4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x42, offset 0x1f9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x43, offset 0x1fa + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x44, offset 0x1fc + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x45, offset 0x1fe + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x46, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x47, offset 0x207 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x48, offset 0x20c + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x49, offset 0x210 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x4a, offset 0x219 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x4b, offset 0x21c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb7}, + // Block 0x4c, offset 0x21f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x225 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4e, offset 0x226 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4f, offset 0x231 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x50, offset 0x232 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x51, offset 0x233 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x52, offset 0x238 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x54, offset 0x24d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x255 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x56, offset 0x25e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x57, offset 0x267 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x58, offset 0x270 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x59, offset 0x275 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x5a, offset 0x278 + {value: 0x1a6a, lo: 0x80, hi: 0x80}, + {value: 0x1aea, lo: 0x81, hi: 0x81}, + {value: 0x1b6a, lo: 0x82, hi: 0x82}, + {value: 0x1bea, lo: 0x83, hi: 0x83}, + {value: 0x1c6a, lo: 0x84, hi: 0x84}, + {value: 0x1cea, lo: 0x85, hi: 0x85}, + {value: 0x1d6a, lo: 0x86, hi: 0x86}, + {value: 0x1dea, lo: 0x87, hi: 0x87}, + {value: 0x1e6a, lo: 0x88, hi: 0x88}, + // Block 0x5b, offset 0x281 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5c, offset 0x28e + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5d, offset 0x290 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8452, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8852, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x297 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5f, offset 0x299 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x2a4 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x2a5 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x1f1a, lo: 0x96, hi: 0x96}, + {value: 0x1fca, lo: 0x97, hi: 0x97}, + {value: 0x207a, lo: 0x98, hi: 0x98}, + {value: 0x212a, lo: 0x99, hi: 0x99}, + {value: 0x21da, lo: 0x9a, hi: 0x9a}, + {value: 0x228a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x233b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x62, offset 0x2b0 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x63, offset 0x2b8 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2c0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x2c6 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x66, offset 0x2c7 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x67, offset 0x2d5 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0x9d52, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x68, offset 0x2da + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x69, offset 0x2dd + {value: 0xa053, lo: 0xb6, hi: 0xb7}, + {value: 0xa353, lo: 0xb8, hi: 0xb9}, + {value: 0xa653, lo: 0xba, hi: 0xbb}, + {value: 0xa353, lo: 0xbc, hi: 0xbd}, + {value: 0xa053, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x2e2 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xa953, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e6 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6c, offset 0x2ec + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6d, offset 0x2f1 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6f, offset 0x2f9 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x70, offset 0x2fe + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x71, offset 0x2ff + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x72, offset 0x305 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x73, offset 0x307 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x74, offset 0x308 + {value: 0x0010, lo: 0x85, hi: 0xad}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x75, offset 0x30a + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x76, offset 0x30c + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x77, offset 0x30f + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x78, offset 0x310 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x79, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x7a, offset 0x315 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x31b + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7c, offset 0x31f + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7d, offset 0x321 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0x9f}, + {value: 0x0004, lo: 0xa0, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7e, offset 0x327 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8453, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7f, offset 0x32e + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x80, offset 0x332 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x81, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x82, offset 0x33c + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x83, offset 0x344 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x348 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x85, offset 0x34d + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x86, offset 0x355 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x87, offset 0x35b + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x88, offset 0x361 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x89, offset 0x36b + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x370 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8b, offset 0x379 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37f + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xac52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x386 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x38a + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8f, offset 0x392 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x394 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x91, offset 0x396 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x92, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x93, offset 0x39b + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x94, offset 0x39d + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x95, offset 0x39e + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x96, offset 0x39f + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x97, offset 0x3a1 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x98, offset 0x3a3 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x99, offset 0x3a9 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x3ae + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3b0 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x3b6 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9d, offset 0x3b9 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9e, offset 0x3bb + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9f, offset 0x3c1 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x3c6 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa1, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa2, offset 0x3c9 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa3, offset 0x3ca + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa4, offset 0x3cb + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x3cd + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa6, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xa7, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa8, offset 0x3d4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa9, offset 0x3d6 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xaa, offset 0x3d9 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xaf53, lo: 0x98, hi: 0x9f}, + {value: 0xb253, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e1 + {value: 0xaf52, lo: 0x80, hi: 0x87}, + {value: 0xb252, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xac, offset 0x3e4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb253, lo: 0xb0, hi: 0xb7}, + {value: 0xaf53, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x3e8 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb252, lo: 0x98, hi: 0x9f}, + {value: 0xaf52, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xae, offset 0x3f0 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xaf, offset 0x3f2 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xb0, offset 0x3f3 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb1, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb2, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x3fc + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb4, offset 0x3fe + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb5, offset 0x3ff + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb6, offset 0x401 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb7, offset 0x403 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb8, offset 0x405 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb3}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x412 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xba, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xbb, offset 0x414 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbc, offset 0x418 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbd, offset 0x41a + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbe, offset 0x41b + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbf, offset 0x41c + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x41d + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc1, offset 0x41e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc2, offset 0x422 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x426 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc4, offset 0x42c + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc5, offset 0x42e + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc6, offset 0x435 + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc7, offset 0x438 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43c + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xc9, offset 0x442 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xca, offset 0x44b + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcb, offset 0x451 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcc, offset 0x457 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcd, offset 0x461 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xce, offset 0x46b + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xcf, offset 0x46d + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd0, offset 0x474 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x47a + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd2, offset 0x480 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x486 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd4, offset 0x489 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x48f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd6, offset 0x492 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd7, offset 0x49a + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd8, offset 0x49b + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd9, offset 0x4a2 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xda, offset 0x4a3 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdb, offset 0x4a6 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xdc, offset 0x4a7 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xdd, offset 0x4ad + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xde, offset 0x4b0 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xdf, offset 0x4b8 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe0, offset 0x4b9 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xe1, offset 0x4ba + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xe2, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xe3, offset 0x4bc + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe4, offset 0x4be + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xe5, offset 0x4c0 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xe6, offset 0x4c2 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xe7, offset 0x4c6 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xe8, offset 0x4c7 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xe9, offset 0x4c9 + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xea, offset 0x4ca + {value: 0x0014, lo: 0xa0, hi: 0xa0}, + // Block 0xeb, offset 0x4cb + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xec, offset 0x4cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xed, offset 0x4d2 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xee, offset 0x4d7 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xef, offset 0x4db + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xf0, offset 0x4dc + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xf1, offset 0x4df + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xf2, offset 0x4e3 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xf3, offset 0x4ee + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xf4, offset 0x4f2 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xf5, offset 0x4fa + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0xf6, offset 0x4ff + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0xf7, offset 0x503 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0xf8, offset 0x506 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0xf9, offset 0x50a + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0xfa, offset 0x50d + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xfb, offset 0x510 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0xfc, offset 0x515 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0xfd, offset 0x519 + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0xfe, offset 0x51d + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xff, offset 0x521 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x100, offset 0x525 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x101, offset 0x527 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x102, offset 0x529 + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x103, offset 0x52c + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x104, offset 0x531 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x105, offset 0x533 + {value: 0xb552, lo: 0x80, hi: 0x81}, + {value: 0xb852, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x106, offset 0x538 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x107, offset 0x541 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x108, offset 0x546 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x109, offset 0x547 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10a, offset 0x54a + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x10b, offset 0x54b + {value: 0x0004, lo: 0xbb, hi: 0xbf}, + // Block 0x10c, offset 0x54c + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x10d, offset 0x54e + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x10e, offset 0x54f + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 14027 bytes (13KiB); checksum: F17D40E8 diff --git a/vendor/golang.org/x/text/cases/trieval.go b/vendor/golang.org/x/text/cases/trieval.go new file mode 100644 index 00000000..4e4d13fe --- /dev/null +++ b/vendor/golang.org/x/text/cases/trieval.go @@ -0,0 +1,217 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package cases + +// This file contains definitions for interpreting the trie value of the case +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds case information for a single rune. It is the value returned +// by a trie lookup. Most mapping information can be stored in a single 16-bit +// value. If not, for example when a rune is mapped to multiple runes, the value +// stores some basic case data and an index into an array with additional data. +// +// The per-rune values have the following format: +// +// if (exception) { +// 15..4 unsigned exception index +// } else { +// 15..8 XOR pattern or index to XOR pattern for case mapping +// Only 13..8 are used for XOR patterns. +// 7 inverseFold (fold to upper, not to lower) +// 6 index: interpret the XOR pattern as an index +// or isMid if case mode is cIgnorableUncased. +// 5..4 CCC: zero (normal or break), above or other +// } +// 3 exception: interpret this value as an exception index +// (TODO: is this bit necessary? Probably implied from case mode.) +// 2..0 case mode +// +// For the non-exceptional cases, a rune must be either uncased, lowercase or +// uppercase. If the rune is cased, the XOR pattern maps either a lowercase +// rune to uppercase or an uppercase rune to lowercase (applied to the 10 +// least-significant bits of the rune). +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + casedMask = 0x0003 + fullCasedMask = 0x0007 + ignorableMask = 0x0006 + ignorableValue = 0x0004 + + inverseFoldBit = 1 << 7 + isMidBit = 1 << 6 + + exceptionBit = 1 << 3 + exceptionShift = 4 + numExceptionBits = 12 + + xorIndexBit = 1 << 6 + xorShift = 8 + + // There is no mapping if all xor bits and the exception bit are zero. + hasMappingMask = 0xff80 | exceptionBit +) + +// The case mode bits encodes the case type of a rune. This includes uncased, +// title, upper and lower case and case ignorable. (For a definition of these +// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare +// cases, a rune can be both cased and case-ignorable. This is encoded by +// cIgnorableCased. A rune of this type is always lower case. Some runes are +// cased while not having a mapping. +// +// A common pattern for scripts in the Unicode standard is for upper and lower +// case runes to alternate for increasing rune values (e.g. the accented Latin +// ranges starting from U+0100 and U+1E00 among others and some Cyrillic +// characters). We use this property by defining a cXORCase mode, where the case +// mode (always upper or lower case) is derived from the rune value. As the XOR +// pattern for case mappings is often identical for successive runes, using +// cXORCase can result in large series of identical trie values. This, in turn, +// allows us to better compress the trie blocks. +const ( + cUncased info = iota // 000 + cTitle // 001 + cLower // 010 + cUpper // 011 + cIgnorableUncased // 100 + cIgnorableCased // 101 // lower case if mappings exist + cXORCase // 11x // case is cLower | ((rune&1) ^ x) + + maxCaseMode = cUpper +) + +func (c info) isCased() bool { + return c&casedMask != 0 +} + +func (c info) isCaseIgnorable() bool { + return c&ignorableMask == ignorableValue +} + +func (c info) isNotCasedAndNotCaseIgnorable() bool { + return c&fullCasedMask == 0 +} + +func (c info) isCaseIgnorableAndNotCased() bool { + return c&fullCasedMask == cIgnorableUncased +} + +func (c info) isMid() bool { + return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased +} + +// The case mapping implementation will need to know about various Canonical +// Combining Class (CCC) values. We encode two of these in the trie value: +// cccZero (0) and cccAbove (230). If the value is cccOther, it means that +// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that +// the rune also has the break category Break (see below). +const ( + cccBreak info = iota << 4 + cccZero + cccAbove + cccOther + + cccMask = cccBreak | cccZero | cccAbove | cccOther +) + +const ( + starter = 0 + above = 230 + iotaSubscript = 240 +) + +// The exceptions slice holds data that does not fit in a normal info entry. +// The entry is pointed to by the exception index in an entry. It has the +// following format: +// +// Header: +// +// byte 0: +// 7..6 unused +// 5..4 CCC type (same bits as entry) +// 3 unused +// 2..0 length of fold +// +// byte 1: +// 7..6 unused +// 5..3 length of 1st mapping of case type +// 2..0 length of 2nd mapping of case type +// +// case 1st 2nd +// lower -> upper, title +// upper -> lower, title +// title -> lower, upper +// +// Lengths with the value 0x7 indicate no value and implies no change. +// A length of 0 indicates a mapping to zero-length string. +// +// Body bytes: +// +// case folding bytes +// lowercase mapping bytes +// uppercase mapping bytes +// titlecase mapping bytes +// closure mapping bytes (for NFKC_Casefold). (TODO) +// +// Fallbacks: +// +// missing fold -> lower +// missing title -> upper +// all missing -> original rune +// +// exceptions starts with a dummy byte to enforce that there is no zero index +// value. +const ( + lengthMask = 0x07 + lengthBits = 3 + noChange = 0 +) + +// References to generated trie. + +var trie = newCaseTrie(0) + +var sparse = sparseBlocks{ + values: sparseValues[:], + offsets: sparseOffsets[:], +} + +// Sparse block lookup code. + +// valueRange is an entry in a sparse block. +type valueRange struct { + value uint16 + lo, hi byte +} + +type sparseBlocks struct { + values []valueRange + offsets []uint16 +} + +// lookup returns the value from values block n for byte b using binary search. +func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { + lo := s.offsets[n] + hi := s.offsets[n+1] + for lo < hi { + m := lo + (hi-lo)/2 + r := s.values[m] + if r.lo <= b && b <= r.hi { + return r.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} + +// lastRuneForTesting is the last rune used for testing. Everything after this +// is boring. +const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go new file mode 100644 index 00000000..3cddbbdd --- /dev/null +++ b/vendor/golang.org/x/text/internal/internal.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains non-exported functionality that are used by +// packages in the text repository. +package internal // import "golang.org/x/text/internal" + +import ( + "sort" + + "golang.org/x/text/language" +) + +// SortTags sorts tags in place. +func SortTags(tags []language.Tag) { + sort.Sort(sorter(tags)) +} + +type sorter []language.Tag + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sorter) Less(i, j int) bool { + return s[i].String() < s[j].String() +} + +// UniqueTags sorts and filters duplicate tags in place and returns a slice with +// only unique tags. +func UniqueTags(tags []language.Tag) []language.Tag { + if len(tags) <= 1 { + return tags + } + SortTags(tags) + k := 0 + for i := 1; i < len(tags); i++ { + if tags[k].String() < tags[i].String() { + k++ + tags[k] = tags[i] + } + } + return tags[:k+1] +} diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go new file mode 100644 index 00000000..1cc004a6 --- /dev/null +++ b/vendor/golang.org/x/text/internal/match.go @@ -0,0 +1,67 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// This file contains matchers that implement CLDR inheritance. +// +// See https://unicode.org/reports/tr35/#Locale_Inheritance. +// +// Some of the inheritance described in this document is already handled by +// the cldr package. + +import ( + "golang.org/x/text/language" +) + +// TODO: consider if (some of the) matching algorithm needs to be public after +// getting some feel about what is generic and what is specific. + +// NewInheritanceMatcher returns a matcher that matches based on the inheritance +// chain. +// +// The matcher uses canonicalization and the parent relationship to find a +// match. The resulting match will always be either Und or a language with the +// same language and script as the requested language. It will not match +// languages for which there is understood to be mutual or one-directional +// intelligibility. +// +// A Match will indicate an Exact match if the language matches after +// canonicalization and High if the matched tag is a parent. +func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher { + tags := &InheritanceMatcher{make(map[language.Tag]int)} + for i, tag := range t { + ct, err := language.All.Canonicalize(tag) + if err != nil { + ct = tag + } + tags.index[ct] = i + } + return tags +} + +type InheritanceMatcher struct { + index map[language.Tag]int +} + +func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) { + for _, t := range want { + ct, err := language.All.Canonicalize(t) + if err != nil { + ct = t + } + conf := language.Exact + for { + if index, ok := m.index[ct]; ok { + return ct, index, conf + } + if ct == language.Und { + break + } + ct = ct.Parent() + conf = language.High + } + } + return language.Und, 0, language.No +} diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 00000000..47a9a541 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,266 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + return ParseProfilesFromReader(pf) +} + +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + fn, b, err := parseLine(line) + if err != nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err) + } + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, b) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + // Merge samples from the same location. + j := 1 + for i := 1; i < len(p.Blocks); i++ { + b := p.Blocks[i] + last := p.Blocks[j-1] + if b.StartLine == last.StartLine && + b.StartCol == last.StartCol && + b.EndLine == last.EndLine && + b.EndCol == last.EndCol { + if b.NumStmt != last.NumStmt { + return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt) + } + if mode == "set" { + p.Blocks[j-1].Count |= b.Count + } else { + p.Blocks[j-1].Count += b.Count + } + continue + } + p.Blocks[j] = b + j++ + } + p.Blocks = p.Blocks[:j] + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +// parseLine parses a line from a coverage file. +// It is equivalent to the regex +// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$ +// +// However, it is much faster: https://golang.org/cl/179377 +func parseLine(l string) (fileName string, block ProfileBlock, err error) { + end := len(l) + + b := ProfileBlock{} + b.Count, end, err = seekBack(l, ' ', end, "Count") + if err != nil { + return "", b, err + } + b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt") + if err != nil { + return "", b, err + } + b.EndCol, end, err = seekBack(l, '.', end, "EndCol") + if err != nil { + return "", b, err + } + b.EndLine, end, err = seekBack(l, ',', end, "EndLine") + if err != nil { + return "", b, err + } + b.StartCol, end, err = seekBack(l, '.', end, "StartCol") + if err != nil { + return "", b, err + } + b.StartLine, end, err = seekBack(l, ':', end, "StartLine") + if err != nil { + return "", b, err + } + fn := l[0:end] + if fn == "" { + return "", b, errors.New("a FileName cannot be blank") + } + return fn, b, nil +} + +// seekBack searches backwards from end to find sep in l, then returns the +// value between sep and end as an integer. +// If seekBack fails, the returned error will reference what. +func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) { + // Since we're seeking backwards and we know only ASCII is legal for these values, + // we can ignore the possibility of non-ASCII characters. + for start := end - 1; start >= 0; start-- { + if l[start] == sep { + i, err := strconv.Atoi(l[start+1 : end]) + if err != nil { + return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err) + } + if i < 0 { + return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i) + } + return i, start, nil + } + } + return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what) +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. + Index int // Order in input file. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + index := 0 + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count, Index: index} + index++ + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + // Boundaries at the same offset should be ordered according to + // their original position. + return b[i].Index < b[j].Index + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 9fa5aa19..2c4c4e23 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -11,8 +11,6 @@ import ( "go/ast" "go/token" "sort" - - "golang.org/x/tools/internal/typeparams" ) // PathEnclosingInterval returns the node that encloses the source @@ -322,7 +320,7 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, n.Recv) } children = append(children, n.Name) - if tparams := typeparams.ForFuncType(n.Type); tparams != nil { + if tparams := n.Type.TypeParams; tparams != nil { children = append(children, tparams) } if n.Type.Params != nil { @@ -377,7 +375,7 @@ func childrenOf(n ast.Node) []ast.Node { tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: children = append(children, tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) @@ -588,7 +586,7 @@ func NodeDescription(n ast.Node) string { return "decrement statement" case *ast.IndexExpr: return "index expression" - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return "index list expression" case *ast.InterfaceType: return "interface type" diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index f430b21b..58934f76 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -9,8 +9,6 @@ import ( "go/ast" "reflect" "sort" - - "golang.org/x/tools/internal/typeparams" ) // An ApplyFunc is invoked by Apply for each node n, even if n is nil, @@ -252,7 +250,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "X", nil, n.X) a.apply(n, "Index", nil, n.Index) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: a.apply(n, "X", nil, n.X) a.applyList(n, "Indices") @@ -293,7 +291,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "Fields", nil, n.Fields) case *ast.FuncType: - if tparams := typeparams.ForFuncType(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Params", nil, n.Params) @@ -408,7 +406,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. case *ast.TypeSpec: a.apply(n, "Doc", nil, n.Doc) a.apply(n, "Name", nil, n.Name) - if tparams := typeparams.ForTypeSpec(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Type", nil, n.Type) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 703c8139..2a872f89 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - "golang.org/x/tools/internal/typeparams" ) const ( @@ -171,7 +169,7 @@ func typeOf(n ast.Node) uint64 { return 1 << nIncDecStmt case *ast.IndexExpr: return 1 << nIndexExpr - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return 1 << nIndexListExpr case *ast.InterfaceType: return 1 << nInterfaceType diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go index 7cf523bc..32c8d142 100644 --- a/vendor/golang.org/x/tools/go/buildutil/tags.go +++ b/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -4,17 +4,22 @@ package buildutil -// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. +// This duplicated logic must be kept in sync with that from go build: +// $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) +// $GOROOT/src/cmd/go/internal/base/flag.go (StringsFlag.Set) +// $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) -import "fmt" +import ( + "fmt" + "strings" +) const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + "For more information about build tags, see the description of " + "build constraints in the documentation for the go/build package" // TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses -// a flag value in the same manner as go build's -tags flag and -// populates a []string slice. +// a flag value the same as go build's -tags flag and populates a []string slice. // // See $GOROOT/src/go/build/doc.go for description of build tags. // See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. @@ -25,19 +30,32 @@ const TagsFlagDoc = "a list of `build tags` to consider satisfied during the bui type TagsFlag []string func (v *TagsFlag) Set(s string) error { - var err error - *v, err = splitQuotedFields(s) - if *v == nil { - *v = []string{} + // See $GOROOT/src/cmd/go/internal/work/build.go (tagsFlag.Set) + // For compatibility with Go 1.12 and earlier, allow "-tags='a b c'" or even just "-tags='a'". + if strings.Contains(s, " ") || strings.Contains(s, "'") { + var err error + *v, err = splitQuotedFields(s) + if *v == nil { + *v = []string{} + } + return err + } + + // Starting in Go 1.13, the -tags flag is a comma-separated list of build tags. + *v = []string{} + for _, s := range strings.Split(s, ",") { + if s != "" { + *v = append(*v, s) + } } - return err + return nil } func (v *TagsFlag) Get() interface{} { return *v } func splitQuotedFields(s string) ([]string, error) { - // Split fields allowing '' or "" around elements. - // Quotes further inside the string do not count. + // See $GOROOT/src/cmd/internal/quoted/quoted.go (Split) + // This must remain in sync with that logic. var f []string for len(s) > 0 { for len(s) > 0 && isSpaceByte(s[0]) { @@ -76,5 +94,7 @@ func (v *TagsFlag) String() string { } func isSpaceByte(c byte) bool { + // See $GOROOT/src/cmd/internal/quoted/quoted.go (isSpaceByte, Split) + // This list must remain in sync with that. return c == ' ' || c == '\t' || c == '\n' || c == '\r' } diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 03543bd4..137cc8df 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -47,7 +47,7 @@ import ( func Find(importPath, srcDir string) (filename, path string) { cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) cmd.Dir = srcDir - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { return "", "" } diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go index 38d5c6c7..697974bb 100644 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -59,11 +59,10 @@ import ( "go/token" "log" "os" + "os/exec" "path/filepath" "regexp" "strings" - - exec "golang.org/x/sys/execabs" ) // ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go index 7d94bbc1..2455be54 100644 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go @@ -8,19 +8,22 @@ import ( "errors" "fmt" "go/build" - exec "golang.org/x/sys/execabs" + "os/exec" "strings" ) // pkgConfig runs pkg-config with the specified arguments and returns the flags it prints. func pkgConfig(mode string, pkgs []string) (flags []string, err error) { cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...) - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err) if len(out) > 0 { s = fmt.Sprintf("%s: %s", s, out) } + if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 { + s = fmt.Sprintf("%s\nstderr:\n%s", s, err.Stderr) + } return nil, errors.New(s) } if len(out) > 0 { diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go index edf62c2c..013c0f50 100644 --- a/vendor/golang.org/x/tools/go/loader/loader.go +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -23,7 +23,7 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/internal/cgo" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) var ignoreVendor build.ImportMode @@ -1033,13 +1033,14 @@ func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), }, errorFunc: imp.conf.TypeChecker.Error, dir: dir, } - typeparams.InitInstanceInfo(&info.Info) + versions.InitFileVersions(&info.Info) // Copy the types.Config so we can vary it across PackageInfos. tc := imp.conf.TypeChecker diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index a7a8f73e..a8d7b06a 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,20 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. +Other build systems may be supported by providing a "driver"; +see [The driver protocol]. + +All patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -64,9 +72,31 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. + See the Example function for typical usage. + +# The driver protocol + +[Load] may be used to load Go packages even in Go projects that use +alternative build systems, by installing an appropriate "driver" +program for the build system and specifying its location in the +GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. + +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +those patterns identify. Drivers must also support the special "file=" +and "pattern=" patterns described above. + +The patterns are provided as positional command-line arguments. A +JSON-encoded [DriverRequest] message providing additional information +is written to the driver's standard input. The driver must write a +JSON-encoded [DriverResponse] message to its standard output. (This +message differs from the JSON schema produced by 'go list'.) */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7..4335c1eb 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -2,46 +2,85 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file enables an external tool to intercept package requests. -// If the tool is present then its results are used in preference to -// the go list command. - package packages +// This file defines the protocol that enables an external "driver" +// tool to supply package metadata in place of 'go list'. + import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) -// The Driver Protocol +// DriverRequest defines the schema of a request for package metadata +// from an external driver program. The JSON-encoded DriverRequest +// message is provided to the driver program's standard input. The +// query patterns are provided as command-line arguments. // -// The driver, given the inputs to a call to Load, returns metadata about the packages specified. -// This allows for different build systems to support go/packages by telling go/packages how the -// packages' source is organized. -// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in -// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package -// documentation in doc.go for the full description of the patterns that need to be supported. -// A driver receives as a JSON-serialized driverRequest struct in standard input and will -// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. - -// driverRequest is used to provide the portion of Load's Config that is needed by a driver. -type driverRequest struct { +// See the package documentation for an overview. +type DriverRequest struct { Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents // of overlay files. Overlay map[string][]byte `json:"overlay"` } +// DriverResponse defines the schema of a response from an external +// driver program, providing the results of a query for package +// metadata. The driver program must write a JSON-encoded +// DriverResponse message to its standard output. +// +// See the package documentation for an overview. +type DriverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the DriverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) + // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found." // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its @@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*driverResponse, error) { - req, err := json.Marshal(driverRequest{ + return func(cfg *Config, words ...string) (*DriverResponse, error) { + req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, BuildFlags: cfg.BuildFlags, @@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) } - var response driverResponse + var response DriverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index c1292b30..22305d9c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -11,6 +11,7 @@ import ( "fmt" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -20,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -35,23 +35,23 @@ type goTooOldError struct { error } -// responseDeduper wraps a driverResponse, deduplicating its contents. +// responseDeduper wraps a DriverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package - dr *driverResponse + dr *DriverResponse } func newDeduper() *responseDeduper { return &responseDeduper{ - dr: &driverResponse{}, + dr: &DriverResponse{}, seenRoots: map[string]bool{}, seenPackages: map[string]*Package{}, } } -// addAll fills in r with a driverResponse. -func (r *responseDeduper) addAll(dr *driverResponse) { +// addAll fills in r with a DriverResponse. +func (r *responseDeduper) addAll(dr *DriverResponse) { for _, pkg := range dr.Packages { r.addPackage(pkg) } @@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { +func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { } // Fill in response.Sizes asynchronously if necessary. - var sizeserr error - var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - sizeswg.Add(1) + errCh := make(chan error) go func() { compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - sizeserr = err response.dr.Compiler = compiler response.dr.Arch = arch - sizeswg.Done() + errCh <- err + }() + defer func() { + if sizesErr := <-errCh; sizesErr != nil { + err = sizesErr + } }() } @@ -208,10 +210,7 @@ extractQueries: } } - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr - } + // (We may yet return an error due to defer.) return response.dr, nil } @@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries // adhocPackage attempts to load or construct an ad-hoc package for a given // query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { +func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { response, err := state.createDriverResponse(query) if err != nil { return nil, err @@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string { // createDriverResponse uses the "go list" command to expand the pattern // words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { +func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkgs := make(map[string]*Package) additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. - response := &driverResponse{ + response := &DriverResponse{ GoVersion: goVersion, } for dec := json.NewDecoder(buf); dec.More(); { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 6cbd3de8..3ea1b3fa 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -9,6 +9,7 @@ package packages import ( "context" "encoding/json" + "errors" "fmt" "go/ast" "go/parser" @@ -24,11 +25,13 @@ import ( "sync" "time" + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -126,9 +129,8 @@ type Config struct { Mode LoadMode // Context specifies the context for the load operation. - // If the context is cancelled, the loader may stop early - // and return an ErrCancelled error. - // If Context is nil, the load cannot be cancelled. + // Cancelling the context may cause [Load] to abort and + // return an error. Context context.Context // Logf is the logger for the config. @@ -206,50 +208,13 @@ type Config struct { Overlay map[string][]byte } -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*driverResponse, error) - -// driverResponse contains the results for a driver query. -type driverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the driverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - // Load loads and returns the Go packages named by the given patterns. // // Config specifies loading options; // nil behaves the same as an empty Config. // -// Load returns an error if any of the patterns was invalid -// as defined by the underlying build system. +// If any of the patterns was invalid as defined by the +// underlying build system, Load returns an error. // It may return an empty list of packages without an error, // for instance for an empty expansion of a valid wildcard. // Errors associated with a particular package are recorded in the @@ -291,9 +256,28 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. // The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { +func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { + const ( + // windowsArgMax specifies the maximum command line length for + // the Windows' CreateProcess function. + windowsArgMax = 32767 + // maxEnvSize is a very rough estimation of the maximum environment + // size of a user. + maxEnvSize = 16384 + // safeArgMax specifies the maximum safe command line length to use + // by the underlying driver excl. the environment. We choose the Windows' + // ARG_MAX as the starting point because it's one of the lowest ARG_MAX + // constants out of the different supported platforms, + // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results. + safeArgMax = windowsArgMax - maxEnvSize + ) + chunks, err := splitIntoChunks(patterns, safeArgMax) + if err != nil { + return nil, false, err + } + if driver := findExternalDriver(cfg); driver != nil { - response, err := driver(cfg, patterns...) + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } else if !response.NotHandled { @@ -302,10 +286,84 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro // (fall through) } - response, err := goListDriver(cfg, patterns...) + response, err := callDriverOnChunks(goListDriver, cfg, chunks) + if err != nil { + return nil, false, err + } return response, false, err } +// splitIntoChunks chunks the slice so that the total number of characters +// in a chunk is no longer than argMax. +func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { + if argMax <= 0 { + return nil, errors.New("failed to split patterns into chunks, negative safe argMax value") + } + var chunks [][]string + charsInChunk := 0 + nextChunkStart := 0 + for i, v := range patterns { + vChars := len(v) + if vChars > argMax { + // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen + return nil, errors.New("failed to split patterns into chunks, a pattern is too long") + } + charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too + if charsInChunk > argMax { + chunks = append(chunks, patterns[nextChunkStart:i]) + nextChunkStart = i + charsInChunk = vChars + } + } + // add the last chunk + if nextChunkStart < len(patterns) { + chunks = append(chunks, patterns[nextChunkStart:]) + } + return chunks, nil +} + +func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { + if len(chunks) == 0 { + return driver(cfg) + } + responses := make([]*DriverResponse, len(chunks)) + errNotHandled := errors.New("driver returned NotHandled") + var g errgroup.Group + for i, chunk := range chunks { + i := i + chunk := chunk + g.Go(func() (err error) { + responses[i], err = driver(cfg, chunk...) + if responses[i] != nil && responses[i].NotHandled { + err = errNotHandled + } + return err + }) + } + if err := g.Wait(); err != nil { + if errors.Is(err, errNotHandled) { + return &DriverResponse{NotHandled: true}, nil + } + return nil, err + } + return mergeResponses(responses...), nil +} + +func mergeResponses(responses ...*DriverResponse) *DriverResponse { + if len(responses) == 0 { + return nil + } + response := newDeduper() + response.dr.NotHandled = false + response.dr.Compiler = responses[0].Compiler + response.dr.Arch = responses[0].Arch + response.dr.GoVersion = responses[0].GoVersion + for _, v := range responses { + response.addAll(v) + } + return response.dr +} + // A Package describes a loaded Go package. type Package struct { // ID is a unique identifier for a package, @@ -369,6 +427,10 @@ type Package struct { // The NeedTypes LoadMode bit sets this field for packages matching the // patterns; type information for dependencies may be missing or incomplete, // unless NeedDeps and NeedImports are also set. + // + // Each call to [Load] returns a consistent set of type + // symbols, as defined by the comment at [types.Identical]. + // Avoid mixing type information from two or more calls to [Load]. Types *types.Package // Fset provides position information for Types, TypesInfo, and Syntax. @@ -432,12 +494,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -654,7 +710,7 @@ func newLoader(cfg *Config) *loader { // refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *driverResponse) ([]*Package, error) { +func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { roots := response.Roots rootMap := make(map[string]int, len(roots)) for i, root := range roots { @@ -801,6 +857,12 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { wg.Wait() } + // If the context is done, return its error and + // throw out [likely] incomplete packages. + if err := ld.Context.Err(); err != nil { + return nil, err + } + result := make([]*Package, len(initial)) for i, lpkg := range initial { result[i] = lpkg.Package @@ -896,6 +958,14 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) lpkg.Fset = ld.Fset + // Start shutting down if the context is done and do not load + // source or export data files. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + // Subtle: we populate all Types fields with an empty Package // before loading export data so that export data processing // never has to create a types.Package for an indirect dependency, @@ -1015,15 +1085,23 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } + // Start shutting down if the context is done and do not type check. + // Packages that import this one will have ld.Context.Err() != nil. + // ld.Context.Err() will be returned later by refine. + if ld.Context.Err() != nil { + return + } + lpkg.TypesInfo = &types.Info{ Types: make(map[ast.Expr]types.TypeAndValue), Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1064,7 +1142,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Sizes: ld.sizes, // may be nil } if lpkg.Module != nil && lpkg.Module.GoVersion != "" { - typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) + tc.GoVersion = "go" + lpkg.Module.GoVersion } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { @@ -1075,10 +1153,24 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } } - types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed + // In go/types go1.21 and go1.22, Checker.Files failed fast with a + // a "too new" error, without calling tc.Error and without + // proceeding to type-check the package (#66525). + // We rely on the runtimeVersion error to give the suggested remedy. + if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 { + if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") { + appendError(types.Error{ + Fset: ld.Fset, + Pos: lpkg.Syntax[0].Package, + Msg: msg, + }) + } + } + // If !Cgo, the type-checker uses FakeImportC mode, so // it doesn't invoke the importer for import "C", // nor report an error for the import, @@ -1100,6 +1192,12 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // If types.Checker.Files had an error that was unreported, + // make sure to report the unknown error so the package is illTyped. + if typErr != nil && len(lpkg.Errors) == 0 { + appendError(typErr) + } + // Record accumulated errors. illTyped := len(lpkg.Errors) > 0 if !illTyped { @@ -1171,11 +1269,6 @@ func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { parsed := make([]*ast.File, n) errors := make([]error, n) for i, file := range filenames { - if ld.Config.Context.Err() != nil { - parsed[i] = nil - errors[i] = ld.Config.Context.Err() - continue - } wg.Add(1) go func(i int, filename string) { parsed[i], errors[i] = ld.parseFile(filename) diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index e742ecc4..a2386c34 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -29,9 +29,12 @@ import ( "strconv" "strings" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) +// TODO(adonovan): think about generic aliases. + // A Path is an opaque name that identifies a types.Object // relative to its package. Conceptually, the name consists of a // sequence of destructuring operations applied to the package scope @@ -223,7 +226,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -283,7 +286,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { // generic named type return Path(r), nil } @@ -310,7 +313,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -391,17 +394,12 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // of objectpath will only be giving us origin methods, anyway, as referring // to instantiated methods is usually not useful. - if typeparams.OriginMethod(meth) != meth { + if meth.Origin() != meth { return "", false } - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { return "", false } @@ -444,6 +442,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -462,7 +462,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -505,7 +505,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } } return nil - case *typeparams.TypeParam: + case *types.TypeParam: name := T.Obj() if name == obj { return append(path, opObj) @@ -525,7 +525,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, opTypeParam, i) @@ -562,7 +562,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } // abstraction of *types.{Named,Signature} type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList + TypeParams() *types.TypeParamList } // abstraction of *types.{Named,TypeParam} type hasObj interface { @@ -616,6 +616,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil + t = aliases.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -664,7 +665,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = tparams.At(index) case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) + tparam, ok := t.(*types.TypeParam) if !ok { return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go new file mode 100644 index 00000000..c24c2eee --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/token" + "go/types" +) + +// Package aliases defines backward compatible shims +// for the types.Alias type representation added in 1.22. +// This defines placeholders for x/tools until 1.26. + +// NewAlias creates a new TypeName in Package pkg that +// is an alias for the type rhs. +// +// The enabled parameter determines whether the resulting [TypeName]'s +// type is an [types.Alias]. Its value must be the result of a call to +// [Enabled], which computes the effective value of +// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled +// function is expensive and should be called once per task (e.g. +// package import), not once per call to NewAlias. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { + if enabled { + tname := types.NewTypeName(pos, pkg, name, nil) + newAlias(tname, rhs) + return tname + } + return types.NewTypeName(pos, pkg, name, rhs) +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go new file mode 100644 index 00000000..c027b9f3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -0,0 +1,31 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package aliases + +import ( + "go/types" +) + +// Alias is a placeholder for a go/types.Alias for <=1.21. +// It will never be created by go/types. +type Alias struct{} + +func (*Alias) String() string { panic("unreachable") } +func (*Alias) Underlying() types.Type { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } + +// Unalias returns the type t for go <=1.21. +func Unalias(t types.Type) types.Type { return t } + +func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// Before go1.22, this function always returns false. +func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go new file mode 100644 index 00000000..b3299548 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package aliases + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" +) + +// Alias is an alias of types.Alias. +type Alias = types.Alias + +// Rhs returns the type on the right-hand side of the alias declaration. +func Rhs(alias *Alias) types.Type { + if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { + return alias.Rhs() // go1.23+ + } + + // go1.22's Alias didn't have the Rhs method, + // so Unalias is the best we can do. + return Unalias(alias) +} + +// Unalias is a wrapper of types.Unalias. +func Unalias(t types.Type) types.Type { return types.Unalias(t) } + +// newAlias is an internal alias around types.NewAlias. +// Direct usage is discouraged as the moment. +// Try to use NewAlias instead. +func newAlias(tname *types.TypeName, rhs types.Type) *Alias { + a := types.NewAlias(tname, rhs) + // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. + Unalias(a) + return a +} + +// Enabled reports whether [NewAlias] should create [types.Alias] types. +// +// This function is expensive! Call it sparingly. +func Enabled() bool { + // The only reliable way to compute the answer is to invoke go/types. + // We don't parse the GODEBUG environment variable, because + // (a) it's tricky to do so in a manner that is consistent + // with the godebug package; in particular, a simple + // substring check is not good enough. The value is a + // rightmost-wins list of options. But more importantly: + // (b) it is impossible to detect changes to the effective + // setting caused by os.Setenv("GODEBUG"), as happens in + // many tests. Therefore any attempt to cache the result + // is just incorrect. + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) + return enabled +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 00000000..c0e8e731 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go deleted file mode 100644 index 581b26c2..00000000 --- a/vendor/golang.org/x/tools/internal/event/tag/tag.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tag provides the labels used for telemetry throughout gopls. -package tag - -import ( - "golang.org/x/tools/internal/event/keys" -) - -var ( - // create the label keys we use - Method = keys.NewString("method", "") - StatusCode = keys.NewString("status.code", "") - StatusMessage = keys.NewString("status.message", "") - RPCID = keys.NewString("id", "") - RPCDirection = keys.NewString("direction", "") - File = keys.NewString("file", "") - Directory = keys.New("directory", "") - URI = keys.New("URI", "") - Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs - PackagePath = keys.NewString("package_path", "") - Query = keys.New("query", "") - Snapshot = keys.NewUInt64("snapshot", "") - Operation = keys.NewString("operation", "") - - Position = keys.New("position", "") - Category = keys.NewString("category", "") - PackageCount = keys.NewInt("packages", "") - Files = keys.New("files", "") - Port = keys.NewInt("port", "") - Type = keys.New("type", "") - HoverKind = keys.NewString("hoverkind", "") - - NewServer = keys.NewString("new_server", "A new server was added") - EndServer = keys.NewString("end_server", "A server was shut down") - - ServerID = keys.NewString("server", "The server ID an event is related to") - Logfile = keys.NewString("logfile", "") - DebugAddress = keys.NewString("debug_address", "") - GoplsPath = keys.NewString("gopls_path", "") - ClientID = keys.NewString("client_id", "") - - Level = keys.NewInt("level", "The logging level") -) - -var ( - // create the stats we measure - Started = keys.NewInt64("started", "Count of started RPCs.") - ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) - SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) - Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) -) - -const ( - Inbound = "in" - Outbound = "out" -) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 2d078ccb..39df9112 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -259,13 +259,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func return } -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - type byPath []*types.Package func (a byPath) Len() int { return len(a) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 6103dd71..deeb67f3 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -23,8 +23,8 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/tokeninternal" - "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -464,7 +464,7 @@ func (p *iexporter) doDecl(obj types.Object) { switch obj := obj.(type) { case *types.Var: - w.tag('V') + w.tag(varTag) w.pos(obj.Pos()) w.typ(obj.Type(), obj.Pkg()) @@ -481,10 +481,10 @@ func (p *iexporter) doDecl(obj types.Object) { } // Function. - if typeparams.ForSignature(sig).Len() == 0 { - w.tag('F') + if sig.TypeParams().Len() == 0 { + w.tag(funcTag) } else { - w.tag('G') + w.tag(genericFuncTag) } w.pos(obj.Pos()) // The tparam list of the function type is the declaration of the type @@ -494,27 +494,27 @@ func (p *iexporter) doDecl(obj types.Object) { // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + if tparams := sig.TypeParams(); tparams.Len() > 0 { w.tparamList(obj.Name(), tparams, obj.Pkg()) } w.signature(sig) case *types.Const: - w.tag('C') + w.tag(constTag) w.pos(obj.Pos()) w.value(obj.Type(), obj.Val()) case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*typeparams.TypeParam); ok { - w.tag('P') + if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) + if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + implicit = iface.IsImplicit() } w.bool(implicit) } @@ -523,8 +523,13 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag('A') + w.tag(aliasTag) w.pos(obj.Pos()) + if alias, ok := t.(*aliases.Alias); ok { + // Preserve materialized aliases, + // even of non-exported types. + t = aliases.Rhs(alias) + } w.typ(t, obj.Pkg()) break } @@ -535,20 +540,20 @@ func (p *iexporter) doDecl(obj types.Object) { panic(internalErrorf("%s is not a defined type", t)) } - if typeparams.ForNamed(named).Len() == 0 { - w.tag('T') + if named.TypeParams().Len() == 0 { + w.tag(typeTag) } else { - w.tag('U') + w.tag(genericTypeTag) } w.pos(obj.Pos()) - if typeparams.ForNamed(named).Len() > 0 { + if named.TypeParams().Len() > 0 { // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } - underlying := obj.Type().Underlying() + underlying := named.Underlying() w.typ(underlying, obj.Pkg()) if types.IsInterface(t) { @@ -565,7 +570,7 @@ func (p *iexporter) doDecl(obj types.Object) { // Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() for i := 0; i < rparams.Len(); i++ { rparam := rparams.At(i) @@ -739,20 +744,25 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { + case *aliases.Alias: + // TODO(adonovan): support parameterized aliases, following *types.Named. + w.startType(aliasType) + w.qualifiedType(t.Obj()) + case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) // TODO(rfindley): investigate if this position is correct, and if it // matters. w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(definedType) w.qualifiedType(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: w.startType(typeParamType) w.qualifiedType(t.Obj()) @@ -844,7 +854,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := ft.(*types.Named); named != nil { + if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) @@ -868,7 +878,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.signature(sig) } - case *typeparams.Union: + case *types.Union: w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) @@ -948,14 +958,14 @@ func (w *exportWriter) signature(sig *types.Signature) { } } -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) for i := 0; i < ts.Len(); i++ { w.typ(ts.At(i), pkg) } } -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) for i := 0; i < list.Len(); i++ { @@ -973,7 +983,7 @@ const blankMarker = "$" // differs from its actual object name: it is prefixed with a qualifier, and // blank type parameter names are disambiguated by their index in the type // parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { +func tparamExportName(prefix string, tparam *types.TypeParam) string { assert(prefix != "") name := tparam.Obj().Name() if name == "_" { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 8e64cf64..136aa036 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -22,7 +22,8 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) type intReader struct { @@ -79,6 +80,20 @@ const ( typeParamType instanceType unionType + aliasType +) + +// Object tags +const ( + varTag = 'V' + funcTag = 'F' + genericFuncTag = 'G' + constTag = 'C' + aliasTag = 'A' + genericAliasTag = 'B' + typeParamTag = 'P' + typeTag = 'T' + genericTypeTag = 'U' ) // IImportData imports a package from the serialized package data @@ -195,6 +210,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte p := iimporter{ version: int(version), ipath: path, + aliases: aliases.Enabled(), shallow: shallow, reportf: reportf, @@ -225,6 +241,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte // Gather the relevant packages from the manifest. items := make([]GetPackagesItem, r.uint64()) + uniquePkgPaths := make(map[string]bool) for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) @@ -249,6 +266,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } items[i].nameIndex = nameIndex + + uniquePkgPaths[pkgPath] = true + } + // Debugging #63822; hypothesis: there are duplicate PkgPaths. + if len(uniquePkgPaths) != len(items) { + reportf("found duplicate PkgPaths while reading export data manifest: %v", items) } // Request packages all at once from the client, @@ -316,12 +339,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } // SetConstraint can't be called if the constraint type is not yet complete. - // When type params are created in the 'P' case of (*importReader).obj(), + // When type params are created in the typeParamTag case of (*importReader).obj(), // the associated constraint type may not be complete due to recursion. // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) + d.t.SetConstraint(d.constraint) } for _, typ := range p.interfaceList { @@ -339,7 +362,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } type setConstraintArgs struct { - t *typeparams.TypeParam + t *types.TypeParam constraint types.Type } @@ -347,6 +370,7 @@ type iimporter struct { version int ipath string + aliases bool shallow bool reportf ReportFunc // if non-nil, used to report bugs @@ -516,7 +540,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := rhs.(*types.Interface) + iface, _ := aliases.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -538,25 +562,29 @@ func (r *importReader) obj(name string) { pos := r.pos() switch tag { - case 'A': + case aliasTag: typ := r.typ() - - r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) - - case 'C': + // TODO(adonovan): support generic aliases: + // if tag == genericAliasTag { + // tparams := r.tparamList() + // alias.SetTypeParams(tparams) + // } + r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + + case constTag: typ, val := r.value() r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) - case 'F', 'G': - var tparams []*typeparams.TypeParam - if tag == 'G' { + case funcTag, genericFuncTag: + var tparams []*types.TypeParam + if tag == genericFuncTag { tparams = r.tparamList() } sig := r.signature(nil, nil, tparams) r.declare(types.NewFunc(pos, r.currPkg, name, sig)) - case 'T', 'U': + case typeTag, genericTypeTag: // Types can be recursive. We need to setup a stub // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) @@ -564,9 +592,9 @@ func (r *importReader) obj(name string) { // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) - if tag == 'U' { + if tag == genericTypeTag { tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) + named.SetTypeParams(tparams) } underlying := r.p.typAt(r.uint64(), named).Underlying() @@ -581,14 +609,13 @@ func (r *importReader) obj(name string) { // If the receiver has any targs, set those as the // rparams of the method (since those are the // typeparams being used in the method sig/body). - base := baseType(recv.Type()) - assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam + _, recvNamed := typesinternal.ReceiverNamed(recv) + targs := recvNamed.TypeArgs() + var rparams []*types.TypeParam if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) + rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) + rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -597,7 +624,7 @@ func (r *importReader) obj(name string) { } } - case 'P': + case typeParamTag: // We need to "declare" a typeparam in order to have a name that // can be referenced recursively (if needed) in the type param's // bound. @@ -606,7 +633,7 @@ func (r *importReader) obj(name string) { } name0 := tparamName(name) tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) + t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. @@ -618,11 +645,11 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := constraint.(*types.Interface) + iface, _ := aliases.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } - typeparams.MarkImplicit(iface) + iface.MarkImplicit() } // The constraint type may not be complete, if we // are in the middle of a type recursion involving type @@ -630,7 +657,7 @@ func (r *importReader) obj(name string) { // completely set up all types in ImportData. r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) - case 'V': + case varTag: typ := r.typ() r.declare(types.NewVar(pos, r.currPkg, name, typ)) @@ -825,7 +852,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := t.(*types.Interface) + _, ok := aliases.Unalias(t).(*types.Interface) return ok } @@ -847,7 +874,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { errorf("unexpected kind tag in %q: %v", r.p.ipath, k) return nil - case definedType: + case aliasType, definedType: pkg, name := r.qualifiedIdent() r.p.doDecl(pkg, name) return pkg.Scope().Lookup(name).(*types.TypeName).Type() @@ -966,7 +993,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // The imported instantiated type doesn't include any methods, so // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) + t, _ := types.Instantiate(nil, baseType, targs, false) // Workaround for golang/go#61561. See the doc for instanceList for details. r.p.instanceList = append(r.p.instanceList, t) @@ -976,11 +1003,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if r.p.version < iexportVersionGenerics { errorf("unexpected instantiation type") } - terms := make([]*typeparams.Term, r.uint64()) + terms := make([]*types.Term, r.uint64()) for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + terms[i] = types.NewTerm(r.bool(), r.typ()) } - return typeparams.NewUnion(terms) + return types.NewUnion(terms) } } @@ -1008,23 +1035,23 @@ func (r *importReader) objectPathObject() types.Object { return obj } -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } -func (r *importReader) tparamList() []*typeparams.TypeParam { +func (r *importReader) tparamList() []*types.TypeParam { n := r.uint64() if n == 0 { return nil } - xs := make([]*typeparams.TypeParam, n) + xs := make([]*types.TypeParam, n) for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) + xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1071,13 +1098,3 @@ func (r *importReader) byte() byte { } return x } - -func baseType(typ types.Type) *types.Named { - // pointer receivers are never types.Named types - if p, _ := typ.(*types.Pointer); p != nil { - typ = p.Elem() - } - // receiver base types are always (possibly generic) types.Named types - n, _ := typ.(*types.Named) - return n -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go deleted file mode 100644 index d892273e..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGo1_11 - -func additionalPredeclared() []types.Type { - return nil -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go index edbe6ea7..0cd3b91b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 -// +build go1.18 - package gcimporter import "go/types" diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go index 286bf445..38b624ca 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !(go1.18 && goexperiment.unified) -// +build !go1.18 !goexperiment.unified +//go:build !goexperiment.unified +// +build !goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go index b5d69ffb..b5118d0b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 && goexperiment.unified -// +build go1.18,goexperiment.unified +//go:build goexperiment.unified +// +build goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go deleted file mode 100644 index 8eb20729..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" -) - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") - return -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b977435f..2c077068 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -4,9 +4,6 @@ // Derived from go/internal/gcimporter/ureader.go -//go:build go1.18 -// +build go1.18 - package gcimporter import ( @@ -16,6 +13,7 @@ import ( "sort" "strings" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" ) @@ -28,6 +26,7 @@ type pkgReader struct { ctxt *types.Context imports map[string]*types.Package // previously imported packages, indexed by path + aliases bool // create types.Alias nodes // lazily initialized arrays corresponding to the unified IR // PosBase, Pkg, and Type sections, respectively. @@ -101,6 +100,7 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st ctxt: ctxt, imports: imports, + aliases: aliases.Enabled(), posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), @@ -526,7 +526,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() typ := r.typ() - declare(types.NewTypeName(pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +553,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index c27b91f8..eb7a8282 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -13,6 +13,7 @@ import ( "io" "log" "os" + "os/exec" "reflect" "regexp" "runtime" @@ -21,12 +22,9 @@ import ( "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/event/keys" "golang.org/x/tools/internal/event/label" - "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -56,11 +54,14 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) -// verb is an event label for the go command verb. -var verb = keys.NewString("verb", "go command verb") +// event keys for go command invocations +var ( + verb = keys.NewString("verb", "go command verb") + directory = keys.NewString("directory", "") +) func invLabels(inv Invocation) []label.Label { - return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} + return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)} } // Run is a convenience wrapper around RunRaw. @@ -159,12 +160,15 @@ type Invocation struct { BuildFlags []string // If ModFlag is set, the go command is invoked with -mod=ModFlag. + // TODO(rfindley): remove, in favor of Args. ModFlag string // If ModFile is set, the go command is invoked with -modfile=ModFile. + // TODO(rfindley): remove, in favor of Args. ModFile string // If Overlay is set, the go command is invoked with -overlay=Overlay. + // TODO(rfindley): remove, in favor of Args. Overlay string // If CleanEnv is set, the invocation will run only with the environment diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go index 2d3d408c..e38d1fb4 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/vendor.go +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -107,3 +107,57 @@ func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*Modul } return mod, lines[4] == "go1.14", nil } + +// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) { + inv.Verb = "env" + inv.Args = []string{"GOWORK"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goWork := string(bytes.TrimSpace(stdout.Bytes())) + if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() { + mainMods, err := getWorkspaceMainModules(ctx, inv, r) + if err != nil { + return false, nil, err + } + return true, mainMods, nil + } + return false, nil, nil +} + +// getWorkspaceMainModules gets the main modules' information. +// This is the information needed to figure out if vendoring should be enabled. +func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, err + } + + lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n") + if len(lines) < 4 { + return nil, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mods := make([]*ModuleJSON, 0, len(lines)/4) + for i := 0; i < len(lines); i += 4 { + mods = append(mods, &ModuleJSON{ + Path: lines[i], + Dir: lines[i+1], + GoMod: lines[i+2], + GoVersion: lines[i+3], + Main: true, + }) + } + return mods, nil +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index f79dd8cc..83615155 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -9,11 +9,13 @@ package gopathwalk import ( "bufio" "bytes" + "io" "io/fs" - "log" "os" "path/filepath" + "runtime" "strings" + "sync" "time" ) @@ -21,8 +23,13 @@ import ( type Options struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool + + // Maximum number of concurrent calls to user-provided callbacks, + // or 0 for GOMAXPROCS. + Concurrency int } // RootType indicates the type of a Root. @@ -43,22 +50,28 @@ type Root struct { Type RootType } -// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// add will be called concurrently. +// +// Unlike filepath.WalkDir, Walk follows symbolic links +// (while guarding against cycles). func Walk(roots []Root, add func(root Root, dir string), opts Options) { WalkSkip(roots, add, func(Root, string) bool { return false }, opts) } -// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to +// find packages. +// +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// For each directory that will be scanned, skip will be called (concurrently) +// For each directory that will be scanned, skip will be called // with the absolute paths of the containing source directory and the directory. // If skip returns false on a directory it will be processed. -// add will be called concurrently. -// skip will be called concurrently. +// +// Unlike filepath.WalkDir, WalkSkip follows symbolic links +// (while guarding against cycles). func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { for _, root := range roots { walkDir(root, add, skip, opts) @@ -67,45 +80,51 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if opts.Logf == nil { + opts.Logf = func(format string, args ...interface{}) {} + } if _, err := os.Stat(root.Path); os.IsNotExist(err) { - if opts.Logf != nil { - opts.Logf("skipping nonexistent directory: %v", root.Path) - } + opts.Logf("skipping nonexistent directory: %v", root.Path) return } start := time.Now() - if opts.Logf != nil { - opts.Logf("scanning %s", root.Path) + opts.Logf("scanning %s", root.Path) + + concurrency := opts.Concurrency + if concurrency == 0 { + // The walk be either CPU-bound or I/O-bound, depending on what the + // caller-supplied add function does and the details of the user's platform + // and machine. Rather than trying to fine-tune the concurrency level for a + // specific environment, we default to GOMAXPROCS: it is likely to be a good + // choice for a CPU-bound add function, and if it is instead I/O-bound, then + // dealing with I/O saturation is arguably the job of the kernel and/or + // runtime. (Oversaturating I/O seems unlikely to harm performance as badly + // as failing to saturate would.) + concurrency = runtime.GOMAXPROCS(0) } - w := &walker{ - root: root, - add: add, - skip: skip, - opts: opts, - added: make(map[string]bool), + root: root, + add: add, + skip: skip, + opts: opts, + sem: make(chan struct{}, concurrency), } w.init() - // Add a trailing path separator to cause filepath.WalkDir to traverse symlinks. + w.sem <- struct{}{} path := root.Path - if len(path) == 0 { - path = "." + string(filepath.Separator) - } else if !os.IsPathSeparator(path[len(path)-1]) { - path = path + string(filepath.Separator) + if path == "" { + path = "." } - - if err := filepath.WalkDir(path, w.walk); err != nil { - logf := opts.Logf - if logf == nil { - logf = log.Printf - } - logf("scanning directory %v: %v", root.Path, err) + if fi, err := os.Lstat(path); err == nil { + w.walk(path, nil, fs.FileInfoToDirEntry(fi)) + } else { + w.opts.Logf("scanning directory %v: %v", root.Path, err) } + <-w.sem + w.walking.Wait() - if opts.Logf != nil { - opts.Logf("scanned %s in %v", root.Path, time.Since(start)) - } + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) } // walker is the callback for fastwalk.Walk. @@ -115,9 +134,18 @@ type walker struct { skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. opts Options // Options passed to Walk by the user. + walking sync.WaitGroup + sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release. ignoredDirs []string - added map[string]bool + added sync.Map // map[string]bool +} + +// A symlinkList is a linked list of os.FileInfos for parent directories +// reached via symlinks. +type symlinkList struct { + info os.FileInfo + prev *symlinkList } // init initializes the walker based on its Options @@ -134,9 +162,7 @@ func (w *walker) init() { for _, p := range ignoredPaths { full := filepath.Join(w.root.Path, p) w.ignoredDirs = append(w.ignoredDirs, full) - if w.opts.Logf != nil { - w.opts.Logf("Directory added to ignore list: %s", full) - } + w.opts.Logf("Directory added to ignore list: %s", full) } } @@ -146,12 +172,10 @@ func (w *walker) init() { func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") slurp, err := os.ReadFile(file) - if w.opts.Logf != nil { - if err != nil { - w.opts.Logf("%v", err) - } else { - w.opts.Logf("Read %s", file) - } + if err != nil { + w.opts.Logf("%v", err) + } else { + w.opts.Logf("Read %s", file) } if err != nil { return nil @@ -184,138 +208,130 @@ func (w *walker) shouldSkipDir(dir string) bool { } // walk walks through the given path. -func (w *walker) walk(path string, d fs.DirEntry, err error) error { - typ := d.Type() - if typ.IsRegular() { +// +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored. +func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) { + if d.Type()&os.ModeSymlink != 0 { + // Walk the symlink's target rather than the symlink itself. + // + // (Note that os.Stat, unlike the lower-lever os.Readlink, + // follows arbitrarily many layers of symlinks, so it will eventually + // reach either a non-symlink or a nonexistent target.) + // + // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src + // and GOPATH/src. Do we really need to traverse them here? If so, why? + + fi, err := os.Stat(path) + if err != nil { + w.opts.Logf("%v", err) + return + } + + // Avoid walking symlink cycles: if we have already followed a symlink to + // this directory as a parent of itself, don't follow it again. + // + // This doesn't catch the first time through a cycle, but it also minimizes + // the number of extra stat calls we make if we *don't* encounter a cycle. + // Since we don't actually expect to encounter symlink cycles in practice, + // this seems like the right tradeoff. + for parent := pathSymlinks; parent != nil; parent = parent.prev { + if os.SameFile(fi, parent.info) { + return + } + } + + pathSymlinks = &symlinkList{ + info: fi, + prev: pathSymlinks, + } + d = fs.FileInfoToDirEntry(fi) + } + + if d.Type().IsRegular() { if !strings.HasSuffix(path, ".go") { - return nil + return } dir := filepath.Dir(path) if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return nil + // + // TODO(bcmills): there are many levels of directory within + // RootModuleCache where this also wouldn't make sense, + // Can we generalize this to any directory without a corresponding + // import path? + return } - if !w.added[dir] { + if _, dup := w.added.LoadOrStore(dir, true); !dup { w.add(w.root, dir) - w.added[dir] = true } - return nil } - if typ == os.ModeDir { - base := filepath.Base(path) - if base == "" || base[0] == '.' || base[0] == '_' || - base == "testdata" || - (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || - (!w.opts.ModulesEnabled && base == "node_modules") { - return filepath.SkipDir - } - if w.shouldSkipDir(path) { - return filepath.SkipDir - } - return nil - } - if typ == os.ModeSymlink && err == nil { - // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src - // and GOPATH/src. Do we really need to traverse them here? If so, why? - - if os.IsPathSeparator(path[len(path)-1]) { - // The OS was supposed to resolve a directory symlink but didn't. - // - // On macOS this may be caused by a known libc/kernel bug; - // see https://go.dev/issue/59586. - // - // On Windows before Go 1.21, this may be caused by a bug in - // os.Lstat (fixed in https://go.dev/cl/463177). - // - // In either case, we can work around the bug by walking this level - // explicitly: first the symlink target itself, then its contents. - - fi, err := os.Stat(path) - if err != nil || !fi.IsDir() { - return nil - } - err = w.walk(path, fs.FileInfoToDirEntry(fi), nil) - if err == filepath.SkipDir { - return nil - } else if err != nil { - return err - } - - ents, _ := os.ReadDir(path) // ignore error if unreadable - for _, d := range ents { - nextPath := filepath.Join(path, d.Name()) - var err error - if d.IsDir() { - err = filepath.WalkDir(nextPath, w.walk) - } else { - err = w.walk(nextPath, d, nil) - if err == filepath.SkipDir { - break - } - } - if err != nil { - return err - } - } - return nil - } - base := filepath.Base(path) - if strings.HasPrefix(base, ".#") { - // Emacs noise. - return nil - } - if w.shouldTraverse(path) { - // Add a trailing separator to traverse the symlink. - nextPath := path + string(filepath.Separator) - return filepath.WalkDir(nextPath, w.walk) - } + if !d.IsDir() { + return } - return nil -} -// shouldTraverse reports whether the symlink fi, found in dir, -// should be followed. It makes sure symlinks were never visited -// before to avoid symlink loops. -func (w *walker) shouldTraverse(path string) bool { - if w.shouldSkipDir(path) { - return false + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") || + w.shouldSkipDir(path) { + return } - ts, err := os.Stat(path) + // Read the directory and walk its entries. + + f, err := os.Open(path) if err != nil { - logf := w.opts.Logf - if logf == nil { - logf = log.Printf - } - logf("%v", err) - return false - } - if !ts.IsDir() { - return false + w.opts.Logf("%v", err) + return } + defer f.Close() - // Check for symlink loops by statting each directory component - // and seeing if any are the same file as ts. for { - parent := filepath.Dir(path) - if parent == path { - // Made it to the root without seeing a cycle. - // Use this symlink. - return true - } - parentInfo, err := os.Stat(parent) + // We impose an arbitrary limit on the number of ReadDir results per + // directory to limit the amount of memory consumed for stale or upcoming + // directory entries. The limit trades off CPU (number of syscalls to read + // the whole directory) against RAM (reachable directory entries other than + // the one currently being processed). + // + // Since we process the directories recursively, we will end up maintaining + // a slice of entries for each level of the directory tree. + // (Compare https://go.dev/issue/36197.) + ents, err := f.ReadDir(1024) if err != nil { - return false + if err != io.EOF { + w.opts.Logf("%v", err) + } + break } - if os.SameFile(ts, parentInfo) { - // Cycle. Don't traverse. - return false + + for _, d := range ents { + nextPath := filepath.Join(path, d.Name()) + if d.IsDir() { + select { + case w.sem <- struct{}{}: + // Got a new semaphore token, so we can traverse the directory concurrently. + d := d + w.walking.Add(1) + go func() { + defer func() { + <-w.sem + w.walking.Done() + }() + w.walk(nextPath, pathSymlinks, d) + }() + continue + + default: + // No tokens available, so traverse serially. + } + } + + w.walk(nextPath, pathSymlinks, d) } - path = parent } - } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 01e8ba5f..93d49a6e 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -13,6 +13,7 @@ import ( "go/build" "go/parser" "go/token" + "go/types" "io/fs" "io/ioutil" "os" @@ -30,6 +31,7 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" ) // importToGroup is a list of functions which map from an import path to @@ -254,7 +256,7 @@ type pass struct { otherFiles []*ast.File // sibling files. // Intermediate state, generated by load. - existingImports map[string]*ImportInfo + existingImports map[string][]*ImportInfo allRefs references missingRefs references @@ -299,6 +301,20 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { return nil } +// if there is a trailing major version, remove it +func withoutVersion(nm string) string { + if v := path.Base(nm); len(v) > 0 && v[0] == 'v' { + if _, err := strconv.Atoi(v[1:]); err == nil { + // this is, for instance, called with rand/v2 and returns rand + if len(v) < len(nm) { + xnm := nm[:len(nm)-len(v)-1] + return path.Base(xnm) + } + } + } + return nm +} + // importIdentifier returns the identifier that imp will introduce. It will // guess if the package name has not been loaded, e.g. because the source // is not available. @@ -308,7 +324,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { } known := p.knownPackages[imp.ImportPath] if known != nil && known.name != "" { - return known.name + return withoutVersion(known.name) } return ImportPathToAssumedName(imp.ImportPath) } @@ -319,7 +335,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { func (p *pass) load() ([]*ImportFix, bool) { p.knownPackages = map[string]*packageInfo{} p.missingRefs = references{} - p.existingImports = map[string]*ImportInfo{} + p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. p.allRefs = collectReferences(p.f) @@ -350,7 +366,7 @@ func (p *pass) load() ([]*ImportFix, bool) { } } for _, imp := range imports { - p.existingImports[p.importIdentifier(imp)] = imp + p.existingImports[p.importIdentifier(imp)] = append(p.existingImports[p.importIdentifier(imp)], imp) } // Find missing references. @@ -389,31 +405,33 @@ func (p *pass) fix() ([]*ImportFix, bool) { // Found everything, or giving up. Add the new imports and remove any unused. var fixes []*ImportFix - for _, imp := range p.existingImports { - // We deliberately ignore globals here, because we can't be sure - // they're in the same package. People do things like put multiple - // main packages in the same directory, and we don't want to - // remove imports if they happen to have the same name as a var in - // a different package. - if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { - fixes = append(fixes, &ImportFix{ - StmtInfo: *imp, - IdentName: p.importIdentifier(imp), - FixType: DeleteImport, - }) - continue - } + for _, identifierImports := range p.existingImports { + for _, imp := range identifierImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } - // An existing import may need to update its import name to be correct. - if name := p.importSpecName(imp); name != imp.Name { - fixes = append(fixes, &ImportFix{ - StmtInfo: ImportInfo{ - Name: name, - ImportPath: imp.ImportPath, - }, - IdentName: p.importIdentifier(imp), - FixType: SetImportName, - }) + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } } } // Collecting fixes involved map iteration, so sort for stability. See @@ -508,9 +526,9 @@ func (p *pass) assumeSiblingImportsValid() { } for left, rights := range refs { if imp, ok := importsByName[left]; ok { - if m, ok := stdlib[imp.ImportPath]; ok { + if m, ok := stdlib.PackageSymbols[imp.ImportPath]; ok { // We have the stdlib in memory; no need to guess. - rights = copyExports(m) + rights = symbolNameSet(m) } p.addCandidate(imp, &packageInfo{ // no name; we already know it. @@ -638,7 +656,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena dupCheck := map[string]struct{}{} // Start off with the standard library. - for importPath, exports := range stdlib { + for importPath, symbols := range stdlib.PackageSymbols { p := &pkg{ dir: filepath.Join(goenv["GOROOT"], "src", importPath), importPathShort: importPath, @@ -647,6 +665,13 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena } dupCheck[importPath] = struct{}{} if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { + var exports []stdlib.Symbol + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Func, stdlib.Type, stdlib.Var, stdlib.Const: + exports = append(exports, sym) + } + } wrappedCallback.exportsLoaded(p, exports) } } @@ -667,7 +692,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena dupCheck[pkg.importPathShort] = struct{}{} return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) }, - exportsLoaded: func(pkg *pkg, exports []string) { + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { // If we're an x_test, load the package under test's test variant. if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { var err error @@ -698,20 +723,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map return result, nil } -func PrimeCache(ctx context.Context, env *ProcessEnv) error { +func PrimeCache(ctx context.Context, resolver Resolver) error { // Fully scan the disk for directories, but don't actually read any Go files. callback := &scanCallback{ - rootFound: func(gopathwalk.Root) bool { - return true + rootFound: func(root gopathwalk.Root) bool { + // See getCandidatePkgs: walking GOROOT is apparently expensive and + // unnecessary. + return root.Type != gopathwalk.RootGOROOT }, dirFound: func(pkg *pkg) bool { return false }, - packageNameLoaded: func(pkg *pkg) bool { - return false - }, + // packageNameLoaded and exportsLoaded must never be called. } - return getCandidatePkgs(ctx, callback, "", "", env) + + return resolver.scan(ctx, callback) } func candidateImportName(pkg *pkg) string { @@ -791,7 +817,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, // A PackageExport is a package and its exports. type PackageExport struct { Fix *ImportFix - Exports []string + Exports []stdlib.Symbol } // GetPackageExports returns all known packages with name pkg and their exports. @@ -806,8 +832,8 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP packageNameLoaded: func(pkg *pkg) bool { return pkg.packageName == searchPkg }, - exportsLoaded: func(pkg *pkg, exports []string) { - sort.Strings(exports) + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { + sortSymbols(exports) wrapped(PackageExport{ Fix: &ImportFix{ StmtInfo: ImportInfo{ @@ -825,16 +851,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} +// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate +// imports when doing cross-platform development. +var requiredGoEnvVars = []string{ + "GO111MODULE", + "GOFLAGS", + "GOINSECURE", + "GOMOD", + "GOMODCACHE", + "GONOPROXY", + "GONOSUMDB", + "GOPATH", + "GOPROXY", + "GOROOT", + "GOSUMDB", + "GOWORK", +} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. +// +// ...a ProcessEnv *also* overwrites its Env along with derived state in the +// form of the resolver. And because it is lazily initialized, an env may just +// be broken and unusable, but there is no way for the caller to detect that: +// all queries will just fail. +// +// TODO(rfindley): refactor this package so that this type (perhaps renamed to +// just Env or Config) is an immutable configuration struct, to be exchanged +// for an initialized object via a constructor that returns an error. Perhaps +// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where +// resolver is a concrete type used for resolving imports. Via this +// refactoring, we can avoid the need to call ProcessEnv.init and +// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this +// these are misused. Also, we'd delegate the caller the decision of how to +// handle a broken environment. type ProcessEnv struct { GocmdRunner *gocommand.Runner BuildFlags []string ModFlag string - ModFile string // SkipPathInScan returns true if the path should be skipped from scans of // the RootCurrentModule root type. The function argument is a clean, @@ -844,7 +899,7 @@ type ProcessEnv struct { // Env overrides the OS environment, and can be used to specify // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because // exec.Command will not honor it. - // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + // Specifying all of requiredGoEnvVars avoids a call to `go env`. Env map[string]string WorkingDir string @@ -852,9 +907,17 @@ type ProcessEnv struct { // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) - initialized bool + // If set, ModCache holds a shared cache of directory info to use across + // multiple ProcessEnvs. + ModCache *DirInfoCache + + initialized bool // see TODO above - resolver Resolver + // resolver and resolverErr are lazily evaluated (see GetResolver). + // This is unclean, but see the big TODO in the docstring for ProcessEnv + // above: for now, we can't be sure that the ProcessEnv is fully initialized. + resolver Resolver + resolverErr error } func (e *ProcessEnv) goEnv() (map[string]string, error) { @@ -934,20 +997,33 @@ func (e *ProcessEnv) env() []string { } func (e *ProcessEnv) GetResolver() (Resolver, error) { - if e.resolver != nil { - return e.resolver, nil - } if err := e.init(); err != nil { return nil, err } - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { - e.resolver = newGopathResolver(e) - return e.resolver, nil + + if e.resolver == nil && e.resolverErr == nil { + // TODO(rfindley): we should only use a gopathResolver here if the working + // directory is actually *in* GOPATH. (I seem to recall an open gopls issue + // for this behavior, but I can't find it). + // + // For gopls, we can optionally explicitly choose a resolver type, since we + // already know the view type. + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + e.resolver = newGopathResolver(e) + } else if r, err := newModuleResolver(e, e.ModCache); err != nil { + e.resolverErr = err + } else { + e.resolver = Resolver(r) + } } - e.resolver = newModuleResolver(e) - return e.resolver, nil + + return e.resolver, e.resolverErr } +// buildContext returns the build.Context to use for matching files. +// +// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform +// development. func (e *ProcessEnv) buildContext() (*build.Context, error) { ctx := build.Default goenv, err := e.goEnv() @@ -997,24 +1073,40 @@ func addStdlibCandidates(pass *pass, refs references) error { if err != nil { return err } + localbase := func(nm string) string { + ans := path.Base(nm) + if ans[0] == 'v' { + // this is called, for instance, with math/rand/v2 and returns rand/v2 + if _, err := strconv.Atoi(ans[1:]); err == nil { + ix := strings.LastIndex(nm, ans) + more := path.Base(nm[:ix]) + ans = path.Join(more, ans) + } + } + return ans + } add := func(pkg string) { // Prevent self-imports. if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { return } - exports := copyExports(stdlib[pkg]) + exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, - &packageInfo{name: path.Base(pkg), exports: exports}) + &packageInfo{name: localbase(pkg), exports: exports}) } for left := range refs { if left == "rand" { - // Make sure we try crypto/rand before math/rand. + // Make sure we try crypto/rand before any version of math/rand as both have Int() + // and our policy is to recommend crypto add("crypto/rand") - add("math/rand") + // if the user's no later than go1.21, this should be "math/rand" + // but we have no way of figuring out what the user is using + // TODO: investigate using the toolchain version to disambiguate in the stdlib + add("math/rand/v2") continue } - for importPath := range stdlib { + for importPath := range stdlib.PackageSymbols { if path.Base(importPath) == left { add(importPath) } @@ -1027,15 +1119,23 @@ func addStdlibCandidates(pass *pass, refs references) error { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) + // scan works with callback to search for packages. See scanCallback for details. scan(ctx context.Context, callback *scanCallback) error + // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. - loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) + // scoreImportPath returns the relevance for an import path. scoreImportPath(ctx context.Context, path string) float64 - ClearForNewScan() + // ClearForNewScan returns a new Resolver based on the receiver that has + // cleared its internal caches of directory contents. + // + // The new resolver should be primed and then set via + // [ProcessEnv.UpdateResolver]. + ClearForNewScan() Resolver } // A scanCallback controls a call to scan and receives its results. @@ -1054,7 +1154,7 @@ type scanCallback struct { // If it returns true, the package's exports will be loaded. packageNameLoaded func(pkg *pkg) bool // exportsLoaded is called when a package's exports have been loaded. - exportsLoaded func(pkg *pkg, exports []string) + exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) } func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { @@ -1118,7 +1218,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols) if err != nil { firstErrOnce.Do(func() { @@ -1149,6 +1249,17 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil }() for result := range results { + // Don't offer completions that would shadow predeclared + // names, such as github.com/coreos/etcd/error. + if types.Universe.Lookup(result.pkg.name) != nil { // predeclared + // Ideally we would skip this candidate only + // if the predeclared name is actually + // referenced by the file, but that's a lot + // trickier to compute and would still create + // an import that is likely to surprise the + // user before long. + continue + } pass.addCandidate(result.imp, result.pkg) } return firstErr @@ -1191,31 +1302,22 @@ func ImportPathToAssumedName(importPath string) string { type gopathResolver struct { env *ProcessEnv walked bool - cache *dirInfoCache + cache *DirInfoCache scanSema chan struct{} // scanSema prevents concurrent scans. } func newGopathResolver(env *ProcessEnv) *gopathResolver { r := &gopathResolver{ - env: env, - cache: &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - }, + env: env, + cache: NewDirInfoCache(), scanSema: make(chan struct{}, 1), } r.scanSema <- struct{}{} return r } -func (r *gopathResolver) ClearForNewScan() { - <-r.scanSema - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - r.walked = false - r.scanSema <- struct{}{} +func (r *gopathResolver) ClearForNewScan() Resolver { + return newGopathResolver(r.env) } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { @@ -1233,7 +1335,7 @@ func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) ( // importPathToName finds out the actual package name, as declared in its .go files. func importPathToName(bctx *build.Context, importPath, srcDir string) string { // Fast path for standard library without going to disk. - if _, ok := stdlib[importPath]; ok { + if stdlib.HasPackage(importPath) { return path.Base(importPath) // stdlib packages always match their paths. } @@ -1431,7 +1533,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error } func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { - if _, ok := stdlib[path]; ok { + if stdlib.HasPackage(path) { return MaxRelevance } return MaxRelevance - 1 @@ -1448,7 +1550,7 @@ func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) [] return result } -func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { return r.cache.CacheExports(ctx, r.env, info) } @@ -1468,7 +1570,7 @@ func VendorlessPath(ipath string) string { return ipath } -func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []stdlib.Symbol, error) { // Look for non-test, buildable .go files which could provide exports. all, err := os.ReadDir(dir) if err != nil { @@ -1492,7 +1594,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } var pkgName string - var exports []string + var exports []stdlib.Symbol fset := token.NewFileSet() for _, fi := range files { select { @@ -1519,24 +1621,44 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl continue } pkgName = f.Name.Name - for name := range f.Scope.Objects { + for name, obj := range f.Scope.Objects { if ast.IsExported(name) { - exports = append(exports, name) + var kind stdlib.Kind + switch obj.Kind { + case ast.Con: + kind = stdlib.Const + case ast.Typ: + kind = stdlib.Type + case ast.Var: + kind = stdlib.Var + case ast.Fun: + kind = stdlib.Func + } + exports = append(exports, stdlib.Symbol{ + Name: name, + Kind: kind, + Version: 0, // unknown; be permissive + }) } } } + sortSymbols(exports) if env.Logf != nil { - sortedExports := append([]string(nil), exports...) - sort.Strings(sortedExports) - env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) + env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) } return pkgName, exports, nil } +func sortSymbols(syms []stdlib.Symbol) { + sort.Slice(syms, func(i, j int) bool { + return syms[i].Name < syms[j].Name + }) +} + // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so @@ -1600,7 +1722,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa exportsMap := make(map[string]bool, len(exports)) for _, sym := range exports { - exportsMap[sym] = true + exportsMap[sym.Name] = true } // If it doesn't have the right @@ -1758,10 +1880,13 @@ func (fn visitFn) Visit(node ast.Node) ast.Visitor { return fn(node) } -func copyExports(pkg []string) map[string]bool { - m := make(map[string]bool, len(pkg)) - for _, v := range pkg { - m[v] = true +func symbolNameSet(symbols []stdlib.Symbol) map[string]bool { + names := make(map[string]bool) + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Const, stdlib.Var, stdlib.Type, stdlib.Func: + names[sym.Name] = true + } } - return m + return names } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 58e637b9..f8346552 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run mkstdlib.go - // Package imports implements a Go pretty-printer (like package "go/format") // that also adds or removes import statements as necessary. package imports @@ -109,7 +107,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e } // formatFile formats the file syntax tree. -// It may mutate the token.FileSet. +// It may mutate the token.FileSet and the ast.File. // // If an adjust function is provided, it is called after formatting // with the original source (formatFile's src parameter) and the @@ -236,7 +234,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast src = src[:len(src)-len("}\n")] // Gofmt has also indented the function body one level. // Remove that indent. - src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1) + src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n")) return matchSpace(orig, src) } return file, adjust, nil diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 5f4d435d..82fe644a 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -21,78 +21,138 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" ) -// ModuleResolver implements resolver for modules using the go command as little -// as feasible. +// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning +// as fast as possible, which is desirable for a call to goimports from the +// command line, but it doesn't work as well for gopls, where it suffers from +// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216), +// both caused by populating the cache, albeit in slightly different ways. +// +// A high level list of TODOs: +// - Optimize the scan itself, as there is some redundancy statting and +// reading go.mod files. +// - Invert the relationship between ProcessEnv and Resolver (see the +// docstring of ProcessEnv). +// - Make it easier to use an external resolver implementation. +// +// Smaller TODOs are annotated in the code below. + +// ModuleResolver implements the Resolver interface for a workspace using +// modules. +// +// A goal of the ModuleResolver is to invoke the Go command as little as +// possible. To this end, it runs the Go command only for listing module +// information (i.e. `go list -m -e -json ...`). Package scanning, the process +// of loading package information for the modules, is implemented internally +// via the scan method. +// +// It has two types of state: the state derived from the go command, which +// is populated by init, and the state derived from scans, which is populated +// via scan. A root is considered scanned if it has been walked to discover +// directories. However, if the scan did not require additional information +// from the directory (such as package name or exports), the directory +// information itself may be partially populated. It will be lazily filled in +// as needed by scans, using the scanCallback. type ModuleResolver struct { - env *ProcessEnv - moduleCacheDir string - dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. - roots []gopathwalk.Root - scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. - scannedRoots map[gopathwalk.Root]bool - - initialized bool - mains []*gocommand.ModuleJSON - mainByDir map[string]*gocommand.ModuleJSON - modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir. - - // moduleCacheCache stores information about the module cache. - moduleCacheCache *dirInfoCache - otherCache *dirInfoCache + env *ProcessEnv + + // Module state, populated during construction + dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory + moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset + roots []gopathwalk.Root // roots to scan, in approximate order of importance + mains []*gocommand.ModuleJSON // main modules + mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots + modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path + modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir. + + // Scanning state, populated by scan + + // scanSema prevents concurrent scans, and guards scannedRoots and the cache + // fields below (though the caches themselves are concurrency safe). + // Receive to acquire, send to release. + scanSema chan struct{} + scannedRoots map[gopathwalk.Root]bool // if true, root has been walked + + // Caches of directory info, populated by scans and scan callbacks + // + // moduleCacheCache stores cached information about roots in the module + // cache, which are immutable and therefore do not need to be invalidated. + // + // otherCache stores information about all other roots (even GOROOT), which + // may change. + moduleCacheCache *DirInfoCache + otherCache *DirInfoCache } -func newModuleResolver(e *ProcessEnv) *ModuleResolver { +// newModuleResolver returns a new module-aware goimports resolver. +// +// Note: use caution when modifying this constructor: changes must also be +// reflected in ModuleResolver.ClearForNewScan. +func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) { r := &ModuleResolver{ env: e, scanSema: make(chan struct{}, 1), } - r.scanSema <- struct{}{} - return r -} - -func (r *ModuleResolver) init() error { - if r.initialized { - return nil - } + r.scanSema <- struct{}{} // release goenv, err := r.env.goEnv() if err != nil { - return err + return nil, err } + + // TODO(rfindley): can we refactor to share logic with r.env.invokeGo? inv := gocommand.Invocation{ BuildFlags: r.env.BuildFlags, ModFlag: r.env.ModFlag, - ModFile: r.env.ModFile, Env: r.env.env(), Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, } vendorEnabled := false - var mainModVendor *gocommand.ModuleJSON - - // Module vendor directories are ignored in workspace mode: - // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md - if len(r.env.Env["GOWORK"]) == 0 { + var mainModVendor *gocommand.ModuleJSON // for module vendoring + var mainModsVendor []*gocommand.ModuleJSON // for workspace vendoring + + goWork := r.env.Env["GOWORK"] + if len(goWork) == 0 { + // TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but + // they should be available from the ProcessEnv. Can we avoid the redundant + // invocation? vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) if err != nil { - return err + return nil, err + } + } else { + vendorEnabled, mainModsVendor, err = gocommand.WorkspaceVendorEnabled(context.Background(), inv, r.env.GocmdRunner) + if err != nil { + return nil, err } } - if mainModVendor != nil && vendorEnabled { - // Vendor mode is on, so all the non-Main modules are irrelevant, - // and we need to search /vendor for everything. - r.mains = []*gocommand.ModuleJSON{mainModVendor} - r.dummyVendorMod = &gocommand.ModuleJSON{ - Path: "", - Dir: filepath.Join(mainModVendor.Dir, "vendor"), + if vendorEnabled { + if mainModVendor != nil { + // Module vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = []*gocommand.ModuleJSON{mainModVendor} + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(mainModVendor.Dir, "vendor"), + } + r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} + } else { + // Workspace vendor mode is on, so all the non-Main modules are irrelevant, + // and we need to search /vendor for everything. + r.mains = mainModsVendor + r.dummyVendorMod = &gocommand.ModuleJSON{ + Path: "", + Dir: filepath.Join(filepath.Dir(goWork), "vendor"), + } + r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) + r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod) } - r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} - r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. err := r.initAllMods() @@ -100,19 +160,14 @@ func (r *ModuleResolver) init() error { // GO111MODULE=on. Other errors are fatal. if err != nil { if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") { - return err + return nil, err } } } - if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { - r.moduleCacheDir = gmc - } else { - gopaths := filepath.SplitList(goenv["GOPATH"]) - if len(gopaths) == 0 { - return fmt.Errorf("empty GOPATH") - } - r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") + r.moduleCacheDir = gomodcacheForEnv(goenv) + if r.moduleCacheDir == "" { + return nil, fmt.Errorf("cannot resolve GOMODCACHE") } sort.Slice(r.modsByModPath, func(i, j int) bool { @@ -128,8 +183,9 @@ func (r *ModuleResolver) init() error { return count(j) < count(i) // descending order }) - r.roots = []gopathwalk.Root{ - {Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}, + r.roots = []gopathwalk.Root{} + if goenv["GOROOT"] != "" { // "" happens in tests + r.roots = append(r.roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT}) } r.mainByDir = make(map[string]*gocommand.ModuleJSON) for _, main := range r.mains { @@ -141,7 +197,11 @@ func (r *ModuleResolver) init() error { } else { addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { - // This is redundant with the cache, but we'll skip it cheaply enough. + // This is redundant with the cache, but we'll skip it cheaply enough + // when we encounter it in the module cache scan. + // + // Including it at a lower index in r.roots than the module cache dir + // helps prioritize matches from within existing dependencies. r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache}) } else { r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther}) @@ -158,24 +218,40 @@ func (r *ModuleResolver) init() error { addDep(mod) } } + // If provided, share the moduleCacheCache. + // + // TODO(rfindley): The module cache is immutable. However, the loaded + // exports do depend on GOOS and GOARCH. Fortunately, the + // ProcessEnv.buildContext does not adjust these from build.DefaultContext + // (even though it should). So for now, this is OK to share, but we need to + // add logic for handling GOOS/GOARCH. + r.moduleCacheCache = moduleCacheCache r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache}) } r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { - r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } - } - if r.otherCache == nil { - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, - } + r.moduleCacheCache = NewDirInfoCache() } - r.initialized = true - return nil + r.otherCache = NewDirInfoCache() + return r, nil +} + +// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env +// map, which must have GOMODCACHE and GOPATH populated. +// +// TODO(rfindley): this is defensive refactoring. +// 1. Is this even relevant anymore? Can't we just read GOMODCACHE. +// 2. Use this to separate module cache scanning from other scanning. +func gomodcacheForEnv(goenv map[string]string) string { + if gmc := goenv["GOMODCACHE"]; gmc != "" { + return gmc + } + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return "" + } + return filepath.Join(gopaths[0], "/pkg/mod") } func (r *ModuleResolver) initAllMods() error { @@ -206,30 +282,86 @@ func (r *ModuleResolver) initAllMods() error { return nil } -func (r *ModuleResolver) ClearForNewScan() { - <-r.scanSema - r.scannedRoots = map[gopathwalk.Root]bool{} - r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - listeners: map[*int]cacheListener{}, +// ClearForNewScan invalidates the last scan. +// +// It preserves the set of roots, but forgets about the set of directories. +// Though it forgets the set of module cache directories, it remembers their +// contents, since they are assumed to be immutable. +func (r *ModuleResolver) ClearForNewScan() Resolver { + <-r.scanSema // acquire r, to guard scannedRoots + r2 := &ModuleResolver{ + env: r.env, + dummyVendorMod: r.dummyVendorMod, + moduleCacheDir: r.moduleCacheDir, + roots: r.roots, + mains: r.mains, + mainByDir: r.mainByDir, + modsByModPath: r.modsByModPath, + + scanSema: make(chan struct{}, 1), + scannedRoots: make(map[gopathwalk.Root]bool), + otherCache: NewDirInfoCache(), + moduleCacheCache: r.moduleCacheCache, + } + r2.scanSema <- struct{}{} // r2 must start released + // Invalidate root scans. We don't need to invalidate module cache roots, + // because they are immutable. + // (We don't support a use case where GOMODCACHE is cleaned in the middle of + // e.g. a gopls session: the user must restart gopls to get accurate + // imports.) + // + // Scanning for new directories in GOMODCACHE should be handled elsewhere, + // via a call to ScanModuleCache. + for _, root := range r.roots { + if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] { + r2.scannedRoots[root] = true + } } - r.scanSema <- struct{}{} + r.scanSema <- struct{}{} // release r + return r2 } -func (r *ModuleResolver) ClearForNewMod() { - <-r.scanSema - *r = ModuleResolver{ - env: r.env, - moduleCacheCache: r.moduleCacheCache, - otherCache: r.otherCache, - scanSema: r.scanSema, +// ClearModuleInfo invalidates resolver state that depends on go.mod file +// contents (essentially, the output of go list -m -json ...). +// +// Notably, it does not forget directory contents, which are reset +// asynchronously via ClearForNewScan. +// +// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op. +// +// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods. +func (e *ProcessEnv) ClearModuleInfo() { + if r, ok := e.resolver.(*ModuleResolver); ok { + resolver, err := newModuleResolver(e, e.ModCache) + if err != nil { + e.resolver = nil + e.resolverErr = err + return + } + + <-r.scanSema // acquire (guards caches) + resolver.moduleCacheCache = r.moduleCacheCache + resolver.otherCache = r.otherCache + r.scanSema <- struct{}{} // release + + e.UpdateResolver(resolver) } - r.init() - r.scanSema <- struct{}{} } -// findPackage returns the module and directory that contains the package at -// the given import path, or returns nil, "" if no module is in scope. +// UpdateResolver sets the resolver for the ProcessEnv to use in imports +// operations. Only for use with the result of [Resolver.ClearForNewScan]. +// +// TODO(rfindley): this awkward API is a result of the (arguably) inverted +// relationship between configuration and state described in the doc comment +// for [ProcessEnv]. +func (e *ProcessEnv) UpdateResolver(r Resolver) { + e.resolver = r + e.resolverErr = nil +} + +// findPackage returns the module and directory from within the main modules +// and their dependencies that contains the package at the given import path, +// or returns nil, "" if no module is in scope. func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. @@ -295,10 +427,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) { } } -func (r *ModuleResolver) cacheKeys() []string { - return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...) -} - // cachePackageName caches the package name for a dir already in the cache. func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { @@ -307,7 +435,7 @@ func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, er return r.otherCache.CachePackageName(info) } -func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { +func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { if info.rootType == gopathwalk.RootModuleCache { return r.moduleCacheCache.CacheExports(ctx, env, info) } @@ -367,15 +495,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON return modDir != mod.Dir } -func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { - readModName := func(modFile string) string { - modBytes, err := os.ReadFile(modFile) - if err != nil { - return "" - } - return modulePath(modBytes) +func readModName(modFile string) string { + modBytes, err := os.ReadFile(modFile) + if err != nil { + return "" } + return modulePath(modBytes) +} +func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) { if r.dirInModuleCache(dir) { if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 { index := strings.Index(dir, matches[1]+"@"+matches[2]) @@ -409,11 +537,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool { } func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if err := r.init(); err != nil { - return nil, err - } names := map[string]string{} for _, path := range importPaths { + // TODO(rfindley): shouldn't this use the dirInfoCache? _, packageDir := r.findPackage(path) if packageDir == "" { continue @@ -431,10 +557,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") defer done() - if err := r.init(); err != nil { - return err - } - processDir := func(info directoryPackageInfo) { // Skip this directory if we were not able to get the package information successfully. if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { @@ -444,18 +566,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error if err != nil { return } - if !callback.dirFound(pkg) { return } + pkg.packageName, err = r.cachePackageName(info) if err != nil { return } - if !callback.packageNameLoaded(pkg) { return } + _, exports, err := r.loadExports(ctx, pkg, false) if err != nil { return @@ -494,7 +616,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error return packageScanned } - // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { r.cacheStore(r.scanDirForPackage(root, dir)) } @@ -509,9 +630,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error select { case <-ctx.Done(): return - case <-r.scanSema: + case <-r.scanSema: // acquire } - defer func() { r.scanSema <- struct{}{} }() + defer func() { r.scanSema <- struct{}{} }() // release // We have the lock on r.scannedRoots, and no other scans can run. for _, root := range roots { if ctx.Err() != nil { @@ -534,7 +655,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error } func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { - if _, ok := stdlib[path]; ok { + if stdlib.HasPackage(path) { return MaxRelevance } mod, _ := r.findPackage(path) @@ -612,10 +733,7 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { return res, nil } -func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { - if err := r.init(); err != nil { - return "", nil, err - } +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index 45690abb..b1192696 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -7,9 +7,14 @@ package imports import ( "context" "fmt" + "path" + "path/filepath" + "strings" "sync" + "golang.org/x/mod/module" "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" ) // To find packages to import, the resolver needs to know about all of @@ -39,6 +44,8 @@ const ( exportsLoaded ) +// directoryPackageInfo holds (possibly incomplete) information about packages +// contained in a given directory. type directoryPackageInfo struct { // status indicates the extent to which this struct has been filled in. status directoryPackageStatus @@ -63,8 +70,11 @@ type directoryPackageInfo struct { packageName string // the package name, as declared in the source. // Set when status >= exportsLoaded. - - exports []string + // TODO(rfindley): it's hard to see this, but exports depend implicitly on + // the default build context GOOS and GOARCH. + // + // We can make this explicit, and key exports by GOOS, GOARCH. + exports []stdlib.Symbol } // reachedStatus returns true when info has a status at least target and any error associated with @@ -79,7 +89,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( return true, nil } -// dirInfoCache is a concurrency safe map for storing information about +// DirInfoCache is a concurrency-safe map for storing information about // directories that may contain packages. // // The information in this cache is built incrementally. Entries are initialized in scan. @@ -92,21 +102,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( // The information in the cache is not expected to change for the cache's // lifetime, so there is no protection against competing writes. Users should // take care not to hold the cache across changes to the underlying files. -// -// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc) -type dirInfoCache struct { +type DirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. dirs map[string]*directoryPackageInfo listeners map[*int]cacheListener } +func NewDirInfoCache() *DirInfoCache { + return &DirInfoCache{ + dirs: make(map[string]*directoryPackageInfo), + listeners: make(map[*int]cacheListener), + } +} + type cacheListener func(directoryPackageInfo) // ScanAndListen calls listener on all the items in the cache, and on anything // newly added. The returned stop function waits for all in-flight callbacks to // finish and blocks new ones. -func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { +func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { ctx, cancel := context.WithCancel(ctx) // Flushing out all the callbacks is tricky without knowing how many there @@ -162,8 +177,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener } // Store stores the package info for dir. -func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { +func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() + // TODO(rfindley, golang/go#59216): should we overwrite an existing entry? + // That seems incorrect as the cache should be idempotent. _, old := d.dirs[dir] d.dirs[dir] = &info var listeners []cacheListener @@ -180,7 +197,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { } // Load returns a copy of the directoryPackageInfo for absolute directory dir. -func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { +func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) { d.mu.Lock() defer d.mu.Unlock() info, ok := d.dirs[dir] @@ -191,7 +208,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) { } // Keys returns the keys currently present in d. -func (d *dirInfoCache) Keys() (keys []string) { +func (d *DirInfoCache) Keys() (keys []string) { d.mu.Lock() defer d.mu.Unlock() for key := range d.dirs { @@ -200,7 +217,7 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { +func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { return info.packageName, err } @@ -213,7 +230,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro return info.packageName, info.err } -func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { +func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { if reached, _ := info.reachedStatus(exportsLoaded); reached { return info.packageName, info.exports, info.err } @@ -234,3 +251,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d d.Store(info.dir, info) return info.packageName, info.exports, info.err } + +// ScanModuleCache walks the given directory, which must be a GOMODCACHE value, +// for directory package information, storing the results in cache. +func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) { + // Note(rfindley): it's hard to see, but this function attempts to implement + // just the side effects on cache of calling PrimeCache with a ProcessEnv + // that has the given dir as its GOMODCACHE. + // + // Teasing out the control flow, we see that we can avoid any handling of + // vendor/ and can infer module info entirely from the path, simplifying the + // logic here. + + root := gopathwalk.Root{ + Path: filepath.Clean(dir), + Type: gopathwalk.RootModuleCache, + } + + directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo { + // This is a copy of ModuleResolver.scanDirForPackage, trimmed down to + // logic that applies to a module cache directory. + + subdir := "" + if dir != root.Path { + subdir = dir[len(root.Path)+len("/"):] + } + + matches := modCacheRegexp.FindStringSubmatch(subdir) + if len(matches) == 0 { + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("invalid module cache path: %v", subdir), + } + } + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) + if err != nil { + if logf != nil { + logf("decoding module cache path %q: %v", subdir, err) + } + return directoryPackageInfo{ + status: directoryScanned, + err: fmt.Errorf("decoding module cache path %q: %v", subdir, err), + } + } + importPath := path.Join(modPath, filepath.ToSlash(matches[3])) + index := strings.Index(dir, matches[1]+"@"+matches[2]) + modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2]) + modName := readModName(filepath.Join(modDir, "go.mod")) + return directoryPackageInfo{ + status: directoryScanned, + dir: dir, + rootType: root.Type, + nonCanonicalImportPath: importPath, + moduleDir: modDir, + moduleName: modName, + } + } + + add := func(root gopathwalk.Root, dir string) { + info := directoryInfo(root, dir) + cache.Store(info.dir, info) + } + + skip := func(_ gopathwalk.Root, dir string) bool { + // Skip directories that have already been scanned. + // + // Note that gopathwalk only adds "package" directories, which must contain + // a .go file, and all such package directories in the module cache are + // immutable. So if we can load a dir, it can be skipped. + info, ok := cache.Load(dir) + if !ok { + return false + } + packageScanned, _ := info.reachedStatus(directoryScanned) + return packageScanned + } + + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true}) +} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index 1a0a7ebd..da8194fd 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -18,7 +18,7 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. // -// It may mutate the token.File. +// It may mutate the token.File and the ast.File. func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go deleted file mode 100644 index 9f992c2b..00000000 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ /dev/null @@ -1,11345 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by mkstdlib.go. DO NOT EDIT. - -package imports - -var stdlib = map[string][]string{ - "archive/tar": { - "ErrFieldTooLong", - "ErrHeader", - "ErrInsecurePath", - "ErrWriteAfterClose", - "ErrWriteTooLong", - "FileInfoHeader", - "Format", - "FormatGNU", - "FormatPAX", - "FormatUSTAR", - "FormatUnknown", - "Header", - "NewReader", - "NewWriter", - "Reader", - "TypeBlock", - "TypeChar", - "TypeCont", - "TypeDir", - "TypeFifo", - "TypeGNULongLink", - "TypeGNULongName", - "TypeGNUSparse", - "TypeLink", - "TypeReg", - "TypeRegA", - "TypeSymlink", - "TypeXGlobalHeader", - "TypeXHeader", - "Writer", - }, - "archive/zip": { - "Compressor", - "Decompressor", - "Deflate", - "ErrAlgorithm", - "ErrChecksum", - "ErrFormat", - "ErrInsecurePath", - "File", - "FileHeader", - "FileInfoHeader", - "NewReader", - "NewWriter", - "OpenReader", - "ReadCloser", - "Reader", - "RegisterCompressor", - "RegisterDecompressor", - "Store", - "Writer", - }, - "bufio": { - "ErrAdvanceTooFar", - "ErrBadReadCount", - "ErrBufferFull", - "ErrFinalToken", - "ErrInvalidUnreadByte", - "ErrInvalidUnreadRune", - "ErrNegativeAdvance", - "ErrNegativeCount", - "ErrTooLong", - "MaxScanTokenSize", - "NewReadWriter", - "NewReader", - "NewReaderSize", - "NewScanner", - "NewWriter", - "NewWriterSize", - "ReadWriter", - "Reader", - "ScanBytes", - "ScanLines", - "ScanRunes", - "ScanWords", - "Scanner", - "SplitFunc", - "Writer", - }, - "bytes": { - "Buffer", - "Clone", - "Compare", - "Contains", - "ContainsAny", - "ContainsFunc", - "ContainsRune", - "Count", - "Cut", - "CutPrefix", - "CutSuffix", - "Equal", - "EqualFold", - "ErrTooLarge", - "Fields", - "FieldsFunc", - "HasPrefix", - "HasSuffix", - "Index", - "IndexAny", - "IndexByte", - "IndexFunc", - "IndexRune", - "Join", - "LastIndex", - "LastIndexAny", - "LastIndexByte", - "LastIndexFunc", - "Map", - "MinRead", - "NewBuffer", - "NewBufferString", - "NewReader", - "Reader", - "Repeat", - "Replace", - "ReplaceAll", - "Runes", - "Split", - "SplitAfter", - "SplitAfterN", - "SplitN", - "Title", - "ToLower", - "ToLowerSpecial", - "ToTitle", - "ToTitleSpecial", - "ToUpper", - "ToUpperSpecial", - "ToValidUTF8", - "Trim", - "TrimFunc", - "TrimLeft", - "TrimLeftFunc", - "TrimPrefix", - "TrimRight", - "TrimRightFunc", - "TrimSpace", - "TrimSuffix", - }, - "cmp": { - "Compare", - "Less", - "Ordered", - }, - "compress/bzip2": { - "NewReader", - "StructuralError", - }, - "compress/flate": { - "BestCompression", - "BestSpeed", - "CorruptInputError", - "DefaultCompression", - "HuffmanOnly", - "InternalError", - "NewReader", - "NewReaderDict", - "NewWriter", - "NewWriterDict", - "NoCompression", - "ReadError", - "Reader", - "Resetter", - "WriteError", - "Writer", - }, - "compress/gzip": { - "BestCompression", - "BestSpeed", - "DefaultCompression", - "ErrChecksum", - "ErrHeader", - "Header", - "HuffmanOnly", - "NewReader", - "NewWriter", - "NewWriterLevel", - "NoCompression", - "Reader", - "Writer", - }, - "compress/lzw": { - "LSB", - "MSB", - "NewReader", - "NewWriter", - "Order", - "Reader", - "Writer", - }, - "compress/zlib": { - "BestCompression", - "BestSpeed", - "DefaultCompression", - "ErrChecksum", - "ErrDictionary", - "ErrHeader", - "HuffmanOnly", - "NewReader", - "NewReaderDict", - "NewWriter", - "NewWriterLevel", - "NewWriterLevelDict", - "NoCompression", - "Resetter", - "Writer", - }, - "container/heap": { - "Fix", - "Init", - "Interface", - "Pop", - "Push", - "Remove", - }, - "container/list": { - "Element", - "List", - "New", - }, - "container/ring": { - "New", - "Ring", - }, - "context": { - "AfterFunc", - "Background", - "CancelCauseFunc", - "CancelFunc", - "Canceled", - "Cause", - "Context", - "DeadlineExceeded", - "TODO", - "WithCancel", - "WithCancelCause", - "WithDeadline", - "WithDeadlineCause", - "WithTimeout", - "WithTimeoutCause", - "WithValue", - "WithoutCancel", - }, - "crypto": { - "BLAKE2b_256", - "BLAKE2b_384", - "BLAKE2b_512", - "BLAKE2s_256", - "Decrypter", - "DecrypterOpts", - "Hash", - "MD4", - "MD5", - "MD5SHA1", - "PrivateKey", - "PublicKey", - "RIPEMD160", - "RegisterHash", - "SHA1", - "SHA224", - "SHA256", - "SHA384", - "SHA3_224", - "SHA3_256", - "SHA3_384", - "SHA3_512", - "SHA512", - "SHA512_224", - "SHA512_256", - "Signer", - "SignerOpts", - }, - "crypto/aes": { - "BlockSize", - "KeySizeError", - "NewCipher", - }, - "crypto/cipher": { - "AEAD", - "Block", - "BlockMode", - "NewCBCDecrypter", - "NewCBCEncrypter", - "NewCFBDecrypter", - "NewCFBEncrypter", - "NewCTR", - "NewGCM", - "NewGCMWithNonceSize", - "NewGCMWithTagSize", - "NewOFB", - "Stream", - "StreamReader", - "StreamWriter", - }, - "crypto/des": { - "BlockSize", - "KeySizeError", - "NewCipher", - "NewTripleDESCipher", - }, - "crypto/dsa": { - "ErrInvalidPublicKey", - "GenerateKey", - "GenerateParameters", - "L1024N160", - "L2048N224", - "L2048N256", - "L3072N256", - "ParameterSizes", - "Parameters", - "PrivateKey", - "PublicKey", - "Sign", - "Verify", - }, - "crypto/ecdh": { - "Curve", - "P256", - "P384", - "P521", - "PrivateKey", - "PublicKey", - "X25519", - }, - "crypto/ecdsa": { - "GenerateKey", - "PrivateKey", - "PublicKey", - "Sign", - "SignASN1", - "Verify", - "VerifyASN1", - }, - "crypto/ed25519": { - "GenerateKey", - "NewKeyFromSeed", - "Options", - "PrivateKey", - "PrivateKeySize", - "PublicKey", - "PublicKeySize", - "SeedSize", - "Sign", - "SignatureSize", - "Verify", - "VerifyWithOptions", - }, - "crypto/elliptic": { - "Curve", - "CurveParams", - "GenerateKey", - "Marshal", - "MarshalCompressed", - "P224", - "P256", - "P384", - "P521", - "Unmarshal", - "UnmarshalCompressed", - }, - "crypto/hmac": { - "Equal", - "New", - }, - "crypto/md5": { - "BlockSize", - "New", - "Size", - "Sum", - }, - "crypto/rand": { - "Int", - "Prime", - "Read", - "Reader", - }, - "crypto/rc4": { - "Cipher", - "KeySizeError", - "NewCipher", - }, - "crypto/rsa": { - "CRTValue", - "DecryptOAEP", - "DecryptPKCS1v15", - "DecryptPKCS1v15SessionKey", - "EncryptOAEP", - "EncryptPKCS1v15", - "ErrDecryption", - "ErrMessageTooLong", - "ErrVerification", - "GenerateKey", - "GenerateMultiPrimeKey", - "OAEPOptions", - "PKCS1v15DecryptOptions", - "PSSOptions", - "PSSSaltLengthAuto", - "PSSSaltLengthEqualsHash", - "PrecomputedValues", - "PrivateKey", - "PublicKey", - "SignPKCS1v15", - "SignPSS", - "VerifyPKCS1v15", - "VerifyPSS", - }, - "crypto/sha1": { - "BlockSize", - "New", - "Size", - "Sum", - }, - "crypto/sha256": { - "BlockSize", - "New", - "New224", - "Size", - "Size224", - "Sum224", - "Sum256", - }, - "crypto/sha512": { - "BlockSize", - "New", - "New384", - "New512_224", - "New512_256", - "Size", - "Size224", - "Size256", - "Size384", - "Sum384", - "Sum512", - "Sum512_224", - "Sum512_256", - }, - "crypto/subtle": { - "ConstantTimeByteEq", - "ConstantTimeCompare", - "ConstantTimeCopy", - "ConstantTimeEq", - "ConstantTimeLessOrEq", - "ConstantTimeSelect", - "XORBytes", - }, - "crypto/tls": { - "AlertError", - "Certificate", - "CertificateRequestInfo", - "CertificateVerificationError", - "CipherSuite", - "CipherSuiteName", - "CipherSuites", - "Client", - "ClientAuthType", - "ClientHelloInfo", - "ClientSessionCache", - "ClientSessionState", - "Config", - "Conn", - "ConnectionState", - "CurveID", - "CurveP256", - "CurveP384", - "CurveP521", - "Dial", - "DialWithDialer", - "Dialer", - "ECDSAWithP256AndSHA256", - "ECDSAWithP384AndSHA384", - "ECDSAWithP521AndSHA512", - "ECDSAWithSHA1", - "Ed25519", - "InsecureCipherSuites", - "Listen", - "LoadX509KeyPair", - "NewLRUClientSessionCache", - "NewListener", - "NewResumptionState", - "NoClientCert", - "PKCS1WithSHA1", - "PKCS1WithSHA256", - "PKCS1WithSHA384", - "PKCS1WithSHA512", - "PSSWithSHA256", - "PSSWithSHA384", - "PSSWithSHA512", - "ParseSessionState", - "QUICClient", - "QUICConfig", - "QUICConn", - "QUICEncryptionLevel", - "QUICEncryptionLevelApplication", - "QUICEncryptionLevelEarly", - "QUICEncryptionLevelHandshake", - "QUICEncryptionLevelInitial", - "QUICEvent", - "QUICEventKind", - "QUICHandshakeDone", - "QUICNoEvent", - "QUICRejectedEarlyData", - "QUICServer", - "QUICSessionTicketOptions", - "QUICSetReadSecret", - "QUICSetWriteSecret", - "QUICTransportParameters", - "QUICTransportParametersRequired", - "QUICWriteData", - "RecordHeaderError", - "RenegotiateFreelyAsClient", - "RenegotiateNever", - "RenegotiateOnceAsClient", - "RenegotiationSupport", - "RequestClientCert", - "RequireAndVerifyClientCert", - "RequireAnyClientCert", - "Server", - "SessionState", - "SignatureScheme", - "TLS_AES_128_GCM_SHA256", - "TLS_AES_256_GCM_SHA384", - "TLS_CHACHA20_POLY1305_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", - "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - "TLS_FALLBACK_SCSV", - "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - "TLS_RSA_WITH_AES_128_CBC_SHA", - "TLS_RSA_WITH_AES_128_CBC_SHA256", - "TLS_RSA_WITH_AES_128_GCM_SHA256", - "TLS_RSA_WITH_AES_256_CBC_SHA", - "TLS_RSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_RC4_128_SHA", - "VerifyClientCertIfGiven", - "VersionName", - "VersionSSL30", - "VersionTLS10", - "VersionTLS11", - "VersionTLS12", - "VersionTLS13", - "X25519", - "X509KeyPair", - }, - "crypto/x509": { - "CANotAuthorizedForExtKeyUsage", - "CANotAuthorizedForThisName", - "CertPool", - "Certificate", - "CertificateInvalidError", - "CertificateRequest", - "ConstraintViolationError", - "CreateCertificate", - "CreateCertificateRequest", - "CreateRevocationList", - "DSA", - "DSAWithSHA1", - "DSAWithSHA256", - "DecryptPEMBlock", - "ECDSA", - "ECDSAWithSHA1", - "ECDSAWithSHA256", - "ECDSAWithSHA384", - "ECDSAWithSHA512", - "Ed25519", - "EncryptPEMBlock", - "ErrUnsupportedAlgorithm", - "Expired", - "ExtKeyUsage", - "ExtKeyUsageAny", - "ExtKeyUsageClientAuth", - "ExtKeyUsageCodeSigning", - "ExtKeyUsageEmailProtection", - "ExtKeyUsageIPSECEndSystem", - "ExtKeyUsageIPSECTunnel", - "ExtKeyUsageIPSECUser", - "ExtKeyUsageMicrosoftCommercialCodeSigning", - "ExtKeyUsageMicrosoftKernelCodeSigning", - "ExtKeyUsageMicrosoftServerGatedCrypto", - "ExtKeyUsageNetscapeServerGatedCrypto", - "ExtKeyUsageOCSPSigning", - "ExtKeyUsageServerAuth", - "ExtKeyUsageTimeStamping", - "HostnameError", - "IncompatibleUsage", - "IncorrectPasswordError", - "InsecureAlgorithmError", - "InvalidReason", - "IsEncryptedPEMBlock", - "KeyUsage", - "KeyUsageCRLSign", - "KeyUsageCertSign", - "KeyUsageContentCommitment", - "KeyUsageDataEncipherment", - "KeyUsageDecipherOnly", - "KeyUsageDigitalSignature", - "KeyUsageEncipherOnly", - "KeyUsageKeyAgreement", - "KeyUsageKeyEncipherment", - "MD2WithRSA", - "MD5WithRSA", - "MarshalECPrivateKey", - "MarshalPKCS1PrivateKey", - "MarshalPKCS1PublicKey", - "MarshalPKCS8PrivateKey", - "MarshalPKIXPublicKey", - "NameConstraintsWithoutSANs", - "NameMismatch", - "NewCertPool", - "NotAuthorizedToSign", - "PEMCipher", - "PEMCipher3DES", - "PEMCipherAES128", - "PEMCipherAES192", - "PEMCipherAES256", - "PEMCipherDES", - "ParseCRL", - "ParseCertificate", - "ParseCertificateRequest", - "ParseCertificates", - "ParseDERCRL", - "ParseECPrivateKey", - "ParsePKCS1PrivateKey", - "ParsePKCS1PublicKey", - "ParsePKCS8PrivateKey", - "ParsePKIXPublicKey", - "ParseRevocationList", - "PublicKeyAlgorithm", - "PureEd25519", - "RSA", - "RevocationList", - "RevocationListEntry", - "SHA1WithRSA", - "SHA256WithRSA", - "SHA256WithRSAPSS", - "SHA384WithRSA", - "SHA384WithRSAPSS", - "SHA512WithRSA", - "SHA512WithRSAPSS", - "SetFallbackRoots", - "SignatureAlgorithm", - "SystemCertPool", - "SystemRootsError", - "TooManyConstraints", - "TooManyIntermediates", - "UnconstrainedName", - "UnhandledCriticalExtension", - "UnknownAuthorityError", - "UnknownPublicKeyAlgorithm", - "UnknownSignatureAlgorithm", - "VerifyOptions", - }, - "crypto/x509/pkix": { - "AlgorithmIdentifier", - "AttributeTypeAndValue", - "AttributeTypeAndValueSET", - "CertificateList", - "Extension", - "Name", - "RDNSequence", - "RelativeDistinguishedNameSET", - "RevokedCertificate", - "TBSCertificateList", - }, - "database/sql": { - "ColumnType", - "Conn", - "DB", - "DBStats", - "Drivers", - "ErrConnDone", - "ErrNoRows", - "ErrTxDone", - "IsolationLevel", - "LevelDefault", - "LevelLinearizable", - "LevelReadCommitted", - "LevelReadUncommitted", - "LevelRepeatableRead", - "LevelSerializable", - "LevelSnapshot", - "LevelWriteCommitted", - "Named", - "NamedArg", - "NullBool", - "NullByte", - "NullFloat64", - "NullInt16", - "NullInt32", - "NullInt64", - "NullString", - "NullTime", - "Open", - "OpenDB", - "Out", - "RawBytes", - "Register", - "Result", - "Row", - "Rows", - "Scanner", - "Stmt", - "Tx", - "TxOptions", - }, - "database/sql/driver": { - "Bool", - "ColumnConverter", - "Conn", - "ConnBeginTx", - "ConnPrepareContext", - "Connector", - "DefaultParameterConverter", - "Driver", - "DriverContext", - "ErrBadConn", - "ErrRemoveArgument", - "ErrSkip", - "Execer", - "ExecerContext", - "Int32", - "IsScanValue", - "IsValue", - "IsolationLevel", - "NamedValue", - "NamedValueChecker", - "NotNull", - "Null", - "Pinger", - "Queryer", - "QueryerContext", - "Result", - "ResultNoRows", - "Rows", - "RowsAffected", - "RowsColumnTypeDatabaseTypeName", - "RowsColumnTypeLength", - "RowsColumnTypeNullable", - "RowsColumnTypePrecisionScale", - "RowsColumnTypeScanType", - "RowsNextResultSet", - "SessionResetter", - "Stmt", - "StmtExecContext", - "StmtQueryContext", - "String", - "Tx", - "TxOptions", - "Validator", - "Value", - "ValueConverter", - "Valuer", - }, - "debug/buildinfo": { - "BuildInfo", - "Read", - "ReadFile", - }, - "debug/dwarf": { - "AddrType", - "ArrayType", - "Attr", - "AttrAbstractOrigin", - "AttrAccessibility", - "AttrAddrBase", - "AttrAddrClass", - "AttrAlignment", - "AttrAllocated", - "AttrArtificial", - "AttrAssociated", - "AttrBaseTypes", - "AttrBinaryScale", - "AttrBitOffset", - "AttrBitSize", - "AttrByteSize", - "AttrCallAllCalls", - "AttrCallAllSourceCalls", - "AttrCallAllTailCalls", - "AttrCallColumn", - "AttrCallDataLocation", - "AttrCallDataValue", - "AttrCallFile", - "AttrCallLine", - "AttrCallOrigin", - "AttrCallPC", - "AttrCallParameter", - "AttrCallReturnPC", - "AttrCallTailCall", - "AttrCallTarget", - "AttrCallTargetClobbered", - "AttrCallValue", - "AttrCalling", - "AttrCommonRef", - "AttrCompDir", - "AttrConstExpr", - "AttrConstValue", - "AttrContainingType", - "AttrCount", - "AttrDataBitOffset", - "AttrDataLocation", - "AttrDataMemberLoc", - "AttrDecimalScale", - "AttrDecimalSign", - "AttrDeclColumn", - "AttrDeclFile", - "AttrDeclLine", - "AttrDeclaration", - "AttrDefaultValue", - "AttrDefaulted", - "AttrDeleted", - "AttrDescription", - "AttrDigitCount", - "AttrDiscr", - "AttrDiscrList", - "AttrDiscrValue", - "AttrDwoName", - "AttrElemental", - "AttrEncoding", - "AttrEndianity", - "AttrEntrypc", - "AttrEnumClass", - "AttrExplicit", - "AttrExportSymbols", - "AttrExtension", - "AttrExternal", - "AttrFrameBase", - "AttrFriend", - "AttrHighpc", - "AttrIdentifierCase", - "AttrImport", - "AttrInline", - "AttrIsOptional", - "AttrLanguage", - "AttrLinkageName", - "AttrLocation", - "AttrLoclistsBase", - "AttrLowerBound", - "AttrLowpc", - "AttrMacroInfo", - "AttrMacros", - "AttrMainSubprogram", - "AttrMutable", - "AttrName", - "AttrNamelistItem", - "AttrNoreturn", - "AttrObjectPointer", - "AttrOrdering", - "AttrPictureString", - "AttrPriority", - "AttrProducer", - "AttrPrototyped", - "AttrPure", - "AttrRanges", - "AttrRank", - "AttrRecursive", - "AttrReference", - "AttrReturnAddr", - "AttrRnglistsBase", - "AttrRvalueReference", - "AttrSegment", - "AttrSibling", - "AttrSignature", - "AttrSmall", - "AttrSpecification", - "AttrStartScope", - "AttrStaticLink", - "AttrStmtList", - "AttrStrOffsetsBase", - "AttrStride", - "AttrStrideSize", - "AttrStringLength", - "AttrStringLengthBitSize", - "AttrStringLengthByteSize", - "AttrThreadsScaled", - "AttrTrampoline", - "AttrType", - "AttrUpperBound", - "AttrUseLocation", - "AttrUseUTF8", - "AttrVarParam", - "AttrVirtuality", - "AttrVisibility", - "AttrVtableElemLoc", - "BasicType", - "BoolType", - "CharType", - "Class", - "ClassAddrPtr", - "ClassAddress", - "ClassBlock", - "ClassConstant", - "ClassExprLoc", - "ClassFlag", - "ClassLinePtr", - "ClassLocList", - "ClassLocListPtr", - "ClassMacPtr", - "ClassRangeListPtr", - "ClassReference", - "ClassReferenceAlt", - "ClassReferenceSig", - "ClassRngList", - "ClassRngListsPtr", - "ClassStrOffsetsPtr", - "ClassString", - "ClassStringAlt", - "ClassUnknown", - "CommonType", - "ComplexType", - "Data", - "DecodeError", - "DotDotDotType", - "Entry", - "EnumType", - "EnumValue", - "ErrUnknownPC", - "Field", - "FloatType", - "FuncType", - "IntType", - "LineEntry", - "LineFile", - "LineReader", - "LineReaderPos", - "New", - "Offset", - "PtrType", - "QualType", - "Reader", - "StructField", - "StructType", - "Tag", - "TagAccessDeclaration", - "TagArrayType", - "TagAtomicType", - "TagBaseType", - "TagCallSite", - "TagCallSiteParameter", - "TagCatchDwarfBlock", - "TagClassType", - "TagCoarrayType", - "TagCommonDwarfBlock", - "TagCommonInclusion", - "TagCompileUnit", - "TagCondition", - "TagConstType", - "TagConstant", - "TagDwarfProcedure", - "TagDynamicType", - "TagEntryPoint", - "TagEnumerationType", - "TagEnumerator", - "TagFileType", - "TagFormalParameter", - "TagFriend", - "TagGenericSubrange", - "TagImmutableType", - "TagImportedDeclaration", - "TagImportedModule", - "TagImportedUnit", - "TagInheritance", - "TagInlinedSubroutine", - "TagInterfaceType", - "TagLabel", - "TagLexDwarfBlock", - "TagMember", - "TagModule", - "TagMutableType", - "TagNamelist", - "TagNamelistItem", - "TagNamespace", - "TagPackedType", - "TagPartialUnit", - "TagPointerType", - "TagPtrToMemberType", - "TagReferenceType", - "TagRestrictType", - "TagRvalueReferenceType", - "TagSetType", - "TagSharedType", - "TagSkeletonUnit", - "TagStringType", - "TagStructType", - "TagSubprogram", - "TagSubrangeType", - "TagSubroutineType", - "TagTemplateAlias", - "TagTemplateTypeParameter", - "TagTemplateValueParameter", - "TagThrownType", - "TagTryDwarfBlock", - "TagTypeUnit", - "TagTypedef", - "TagUnionType", - "TagUnspecifiedParameters", - "TagUnspecifiedType", - "TagVariable", - "TagVariant", - "TagVariantPart", - "TagVolatileType", - "TagWithStmt", - "Type", - "TypedefType", - "UcharType", - "UintType", - "UnspecifiedType", - "UnsupportedType", - "VoidType", - }, - "debug/elf": { - "ARM_MAGIC_TRAMP_NUMBER", - "COMPRESS_HIOS", - "COMPRESS_HIPROC", - "COMPRESS_LOOS", - "COMPRESS_LOPROC", - "COMPRESS_ZLIB", - "COMPRESS_ZSTD", - "Chdr32", - "Chdr64", - "Class", - "CompressionType", - "DF_1_CONFALT", - "DF_1_DIRECT", - "DF_1_DISPRELDNE", - "DF_1_DISPRELPND", - "DF_1_EDITED", - "DF_1_ENDFILTEE", - "DF_1_GLOBAL", - "DF_1_GLOBAUDIT", - "DF_1_GROUP", - "DF_1_IGNMULDEF", - "DF_1_INITFIRST", - "DF_1_INTERPOSE", - "DF_1_KMOD", - "DF_1_LOADFLTR", - "DF_1_NOCOMMON", - "DF_1_NODEFLIB", - "DF_1_NODELETE", - "DF_1_NODIRECT", - "DF_1_NODUMP", - "DF_1_NOHDR", - "DF_1_NOKSYMS", - "DF_1_NOOPEN", - "DF_1_NORELOC", - "DF_1_NOW", - "DF_1_ORIGIN", - "DF_1_PIE", - "DF_1_SINGLETON", - "DF_1_STUB", - "DF_1_SYMINTPOSE", - "DF_1_TRANS", - "DF_1_WEAKFILTER", - "DF_BIND_NOW", - "DF_ORIGIN", - "DF_STATIC_TLS", - "DF_SYMBOLIC", - "DF_TEXTREL", - "DT_ADDRRNGHI", - "DT_ADDRRNGLO", - "DT_AUDIT", - "DT_AUXILIARY", - "DT_BIND_NOW", - "DT_CHECKSUM", - "DT_CONFIG", - "DT_DEBUG", - "DT_DEPAUDIT", - "DT_ENCODING", - "DT_FEATURE", - "DT_FILTER", - "DT_FINI", - "DT_FINI_ARRAY", - "DT_FINI_ARRAYSZ", - "DT_FLAGS", - "DT_FLAGS_1", - "DT_GNU_CONFLICT", - "DT_GNU_CONFLICTSZ", - "DT_GNU_HASH", - "DT_GNU_LIBLIST", - "DT_GNU_LIBLISTSZ", - "DT_GNU_PRELINKED", - "DT_HASH", - "DT_HIOS", - "DT_HIPROC", - "DT_INIT", - "DT_INIT_ARRAY", - "DT_INIT_ARRAYSZ", - "DT_JMPREL", - "DT_LOOS", - "DT_LOPROC", - "DT_MIPS_AUX_DYNAMIC", - "DT_MIPS_BASE_ADDRESS", - "DT_MIPS_COMPACT_SIZE", - "DT_MIPS_CONFLICT", - "DT_MIPS_CONFLICTNO", - "DT_MIPS_CXX_FLAGS", - "DT_MIPS_DELTA_CLASS", - "DT_MIPS_DELTA_CLASSSYM", - "DT_MIPS_DELTA_CLASSSYM_NO", - "DT_MIPS_DELTA_CLASS_NO", - "DT_MIPS_DELTA_INSTANCE", - "DT_MIPS_DELTA_INSTANCE_NO", - "DT_MIPS_DELTA_RELOC", - "DT_MIPS_DELTA_RELOC_NO", - "DT_MIPS_DELTA_SYM", - "DT_MIPS_DELTA_SYM_NO", - "DT_MIPS_DYNSTR_ALIGN", - "DT_MIPS_FLAGS", - "DT_MIPS_GOTSYM", - "DT_MIPS_GP_VALUE", - "DT_MIPS_HIDDEN_GOTIDX", - "DT_MIPS_HIPAGENO", - "DT_MIPS_ICHECKSUM", - "DT_MIPS_INTERFACE", - "DT_MIPS_INTERFACE_SIZE", - "DT_MIPS_IVERSION", - "DT_MIPS_LIBLIST", - "DT_MIPS_LIBLISTNO", - "DT_MIPS_LOCALPAGE_GOTIDX", - "DT_MIPS_LOCAL_GOTIDX", - "DT_MIPS_LOCAL_GOTNO", - "DT_MIPS_MSYM", - "DT_MIPS_OPTIONS", - "DT_MIPS_PERF_SUFFIX", - "DT_MIPS_PIXIE_INIT", - "DT_MIPS_PLTGOT", - "DT_MIPS_PROTECTED_GOTIDX", - "DT_MIPS_RLD_MAP", - "DT_MIPS_RLD_MAP_REL", - "DT_MIPS_RLD_TEXT_RESOLVE_ADDR", - "DT_MIPS_RLD_VERSION", - "DT_MIPS_RWPLT", - "DT_MIPS_SYMBOL_LIB", - "DT_MIPS_SYMTABNO", - "DT_MIPS_TIME_STAMP", - "DT_MIPS_UNREFEXTNO", - "DT_MOVEENT", - "DT_MOVESZ", - "DT_MOVETAB", - "DT_NEEDED", - "DT_NULL", - "DT_PLTGOT", - "DT_PLTPAD", - "DT_PLTPADSZ", - "DT_PLTREL", - "DT_PLTRELSZ", - "DT_POSFLAG_1", - "DT_PPC64_GLINK", - "DT_PPC64_OPD", - "DT_PPC64_OPDSZ", - "DT_PPC64_OPT", - "DT_PPC_GOT", - "DT_PPC_OPT", - "DT_PREINIT_ARRAY", - "DT_PREINIT_ARRAYSZ", - "DT_REL", - "DT_RELA", - "DT_RELACOUNT", - "DT_RELAENT", - "DT_RELASZ", - "DT_RELCOUNT", - "DT_RELENT", - "DT_RELSZ", - "DT_RPATH", - "DT_RUNPATH", - "DT_SONAME", - "DT_SPARC_REGISTER", - "DT_STRSZ", - "DT_STRTAB", - "DT_SYMBOLIC", - "DT_SYMENT", - "DT_SYMINENT", - "DT_SYMINFO", - "DT_SYMINSZ", - "DT_SYMTAB", - "DT_SYMTAB_SHNDX", - "DT_TEXTREL", - "DT_TLSDESC_GOT", - "DT_TLSDESC_PLT", - "DT_USED", - "DT_VALRNGHI", - "DT_VALRNGLO", - "DT_VERDEF", - "DT_VERDEFNUM", - "DT_VERNEED", - "DT_VERNEEDNUM", - "DT_VERSYM", - "Data", - "Dyn32", - "Dyn64", - "DynFlag", - "DynFlag1", - "DynTag", - "EI_ABIVERSION", - "EI_CLASS", - "EI_DATA", - "EI_NIDENT", - "EI_OSABI", - "EI_PAD", - "EI_VERSION", - "ELFCLASS32", - "ELFCLASS64", - "ELFCLASSNONE", - "ELFDATA2LSB", - "ELFDATA2MSB", - "ELFDATANONE", - "ELFMAG", - "ELFOSABI_86OPEN", - "ELFOSABI_AIX", - "ELFOSABI_ARM", - "ELFOSABI_AROS", - "ELFOSABI_CLOUDABI", - "ELFOSABI_FENIXOS", - "ELFOSABI_FREEBSD", - "ELFOSABI_HPUX", - "ELFOSABI_HURD", - "ELFOSABI_IRIX", - "ELFOSABI_LINUX", - "ELFOSABI_MODESTO", - "ELFOSABI_NETBSD", - "ELFOSABI_NONE", - "ELFOSABI_NSK", - "ELFOSABI_OPENBSD", - "ELFOSABI_OPENVMS", - "ELFOSABI_SOLARIS", - "ELFOSABI_STANDALONE", - "ELFOSABI_TRU64", - "EM_386", - "EM_486", - "EM_56800EX", - "EM_68HC05", - "EM_68HC08", - "EM_68HC11", - "EM_68HC12", - "EM_68HC16", - "EM_68K", - "EM_78KOR", - "EM_8051", - "EM_860", - "EM_88K", - "EM_960", - "EM_AARCH64", - "EM_ALPHA", - "EM_ALPHA_STD", - "EM_ALTERA_NIOS2", - "EM_AMDGPU", - "EM_ARC", - "EM_ARCA", - "EM_ARC_COMPACT", - "EM_ARC_COMPACT2", - "EM_ARM", - "EM_AVR", - "EM_AVR32", - "EM_BA1", - "EM_BA2", - "EM_BLACKFIN", - "EM_BPF", - "EM_C166", - "EM_CDP", - "EM_CE", - "EM_CLOUDSHIELD", - "EM_COGE", - "EM_COLDFIRE", - "EM_COOL", - "EM_COREA_1ST", - "EM_COREA_2ND", - "EM_CR", - "EM_CR16", - "EM_CRAYNV2", - "EM_CRIS", - "EM_CRX", - "EM_CSR_KALIMBA", - "EM_CUDA", - "EM_CYPRESS_M8C", - "EM_D10V", - "EM_D30V", - "EM_DSP24", - "EM_DSPIC30F", - "EM_DXP", - "EM_ECOG1", - "EM_ECOG16", - "EM_ECOG1X", - "EM_ECOG2", - "EM_ETPU", - "EM_EXCESS", - "EM_F2MC16", - "EM_FIREPATH", - "EM_FR20", - "EM_FR30", - "EM_FT32", - "EM_FX66", - "EM_H8S", - "EM_H8_300", - "EM_H8_300H", - "EM_H8_500", - "EM_HUANY", - "EM_IA_64", - "EM_INTEL205", - "EM_INTEL206", - "EM_INTEL207", - "EM_INTEL208", - "EM_INTEL209", - "EM_IP2K", - "EM_JAVELIN", - "EM_K10M", - "EM_KM32", - "EM_KMX16", - "EM_KMX32", - "EM_KMX8", - "EM_KVARC", - "EM_L10M", - "EM_LANAI", - "EM_LATTICEMICO32", - "EM_LOONGARCH", - "EM_M16C", - "EM_M32", - "EM_M32C", - "EM_M32R", - "EM_MANIK", - "EM_MAX", - "EM_MAXQ30", - "EM_MCHP_PIC", - "EM_MCST_ELBRUS", - "EM_ME16", - "EM_METAG", - "EM_MICROBLAZE", - "EM_MIPS", - "EM_MIPS_RS3_LE", - "EM_MIPS_RS4_BE", - "EM_MIPS_X", - "EM_MMA", - "EM_MMDSP_PLUS", - "EM_MMIX", - "EM_MN10200", - "EM_MN10300", - "EM_MOXIE", - "EM_MSP430", - "EM_NCPU", - "EM_NDR1", - "EM_NDS32", - "EM_NONE", - "EM_NORC", - "EM_NS32K", - "EM_OPEN8", - "EM_OPENRISC", - "EM_PARISC", - "EM_PCP", - "EM_PDP10", - "EM_PDP11", - "EM_PDSP", - "EM_PJ", - "EM_PPC", - "EM_PPC64", - "EM_PRISM", - "EM_QDSP6", - "EM_R32C", - "EM_RCE", - "EM_RH32", - "EM_RISCV", - "EM_RL78", - "EM_RS08", - "EM_RX", - "EM_S370", - "EM_S390", - "EM_SCORE7", - "EM_SEP", - "EM_SE_C17", - "EM_SE_C33", - "EM_SH", - "EM_SHARC", - "EM_SLE9X", - "EM_SNP1K", - "EM_SPARC", - "EM_SPARC32PLUS", - "EM_SPARCV9", - "EM_ST100", - "EM_ST19", - "EM_ST200", - "EM_ST7", - "EM_ST9PLUS", - "EM_STARCORE", - "EM_STM8", - "EM_STXP7X", - "EM_SVX", - "EM_TILE64", - "EM_TILEGX", - "EM_TILEPRO", - "EM_TINYJ", - "EM_TI_ARP32", - "EM_TI_C2000", - "EM_TI_C5500", - "EM_TI_C6000", - "EM_TI_PRU", - "EM_TMM_GPP", - "EM_TPC", - "EM_TRICORE", - "EM_TRIMEDIA", - "EM_TSK3000", - "EM_UNICORE", - "EM_V800", - "EM_V850", - "EM_VAX", - "EM_VIDEOCORE", - "EM_VIDEOCORE3", - "EM_VIDEOCORE5", - "EM_VISIUM", - "EM_VPP500", - "EM_X86_64", - "EM_XCORE", - "EM_XGATE", - "EM_XIMO16", - "EM_XTENSA", - "EM_Z80", - "EM_ZSP", - "ET_CORE", - "ET_DYN", - "ET_EXEC", - "ET_HIOS", - "ET_HIPROC", - "ET_LOOS", - "ET_LOPROC", - "ET_NONE", - "ET_REL", - "EV_CURRENT", - "EV_NONE", - "ErrNoSymbols", - "File", - "FileHeader", - "FormatError", - "Header32", - "Header64", - "ImportedSymbol", - "Machine", - "NT_FPREGSET", - "NT_PRPSINFO", - "NT_PRSTATUS", - "NType", - "NewFile", - "OSABI", - "Open", - "PF_MASKOS", - "PF_MASKPROC", - "PF_R", - "PF_W", - "PF_X", - "PT_AARCH64_ARCHEXT", - "PT_AARCH64_UNWIND", - "PT_ARM_ARCHEXT", - "PT_ARM_EXIDX", - "PT_DYNAMIC", - "PT_GNU_EH_FRAME", - "PT_GNU_MBIND_HI", - "PT_GNU_MBIND_LO", - "PT_GNU_PROPERTY", - "PT_GNU_RELRO", - "PT_GNU_STACK", - "PT_HIOS", - "PT_HIPROC", - "PT_INTERP", - "PT_LOAD", - "PT_LOOS", - "PT_LOPROC", - "PT_MIPS_ABIFLAGS", - "PT_MIPS_OPTIONS", - "PT_MIPS_REGINFO", - "PT_MIPS_RTPROC", - "PT_NOTE", - "PT_NULL", - "PT_OPENBSD_BOOTDATA", - "PT_OPENBSD_RANDOMIZE", - "PT_OPENBSD_WXNEEDED", - "PT_PAX_FLAGS", - "PT_PHDR", - "PT_S390_PGSTE", - "PT_SHLIB", - "PT_SUNWSTACK", - "PT_SUNW_EH_FRAME", - "PT_TLS", - "Prog", - "Prog32", - "Prog64", - "ProgFlag", - "ProgHeader", - "ProgType", - "R_386", - "R_386_16", - "R_386_32", - "R_386_32PLT", - "R_386_8", - "R_386_COPY", - "R_386_GLOB_DAT", - "R_386_GOT32", - "R_386_GOT32X", - "R_386_GOTOFF", - "R_386_GOTPC", - "R_386_IRELATIVE", - "R_386_JMP_SLOT", - "R_386_NONE", - "R_386_PC16", - "R_386_PC32", - "R_386_PC8", - "R_386_PLT32", - "R_386_RELATIVE", - "R_386_SIZE32", - "R_386_TLS_DESC", - "R_386_TLS_DESC_CALL", - "R_386_TLS_DTPMOD32", - "R_386_TLS_DTPOFF32", - "R_386_TLS_GD", - "R_386_TLS_GD_32", - "R_386_TLS_GD_CALL", - "R_386_TLS_GD_POP", - "R_386_TLS_GD_PUSH", - "R_386_TLS_GOTDESC", - "R_386_TLS_GOTIE", - "R_386_TLS_IE", - "R_386_TLS_IE_32", - "R_386_TLS_LDM", - "R_386_TLS_LDM_32", - "R_386_TLS_LDM_CALL", - "R_386_TLS_LDM_POP", - "R_386_TLS_LDM_PUSH", - "R_386_TLS_LDO_32", - "R_386_TLS_LE", - "R_386_TLS_LE_32", - "R_386_TLS_TPOFF", - "R_386_TLS_TPOFF32", - "R_390", - "R_390_12", - "R_390_16", - "R_390_20", - "R_390_32", - "R_390_64", - "R_390_8", - "R_390_COPY", - "R_390_GLOB_DAT", - "R_390_GOT12", - "R_390_GOT16", - "R_390_GOT20", - "R_390_GOT32", - "R_390_GOT64", - "R_390_GOTENT", - "R_390_GOTOFF", - "R_390_GOTOFF16", - "R_390_GOTOFF64", - "R_390_GOTPC", - "R_390_GOTPCDBL", - "R_390_GOTPLT12", - "R_390_GOTPLT16", - "R_390_GOTPLT20", - "R_390_GOTPLT32", - "R_390_GOTPLT64", - "R_390_GOTPLTENT", - "R_390_GOTPLTOFF16", - "R_390_GOTPLTOFF32", - "R_390_GOTPLTOFF64", - "R_390_JMP_SLOT", - "R_390_NONE", - "R_390_PC16", - "R_390_PC16DBL", - "R_390_PC32", - "R_390_PC32DBL", - "R_390_PC64", - "R_390_PLT16DBL", - "R_390_PLT32", - "R_390_PLT32DBL", - "R_390_PLT64", - "R_390_RELATIVE", - "R_390_TLS_DTPMOD", - "R_390_TLS_DTPOFF", - "R_390_TLS_GD32", - "R_390_TLS_GD64", - "R_390_TLS_GDCALL", - "R_390_TLS_GOTIE12", - "R_390_TLS_GOTIE20", - "R_390_TLS_GOTIE32", - "R_390_TLS_GOTIE64", - "R_390_TLS_IE32", - "R_390_TLS_IE64", - "R_390_TLS_IEENT", - "R_390_TLS_LDCALL", - "R_390_TLS_LDM32", - "R_390_TLS_LDM64", - "R_390_TLS_LDO32", - "R_390_TLS_LDO64", - "R_390_TLS_LE32", - "R_390_TLS_LE64", - "R_390_TLS_LOAD", - "R_390_TLS_TPOFF", - "R_AARCH64", - "R_AARCH64_ABS16", - "R_AARCH64_ABS32", - "R_AARCH64_ABS64", - "R_AARCH64_ADD_ABS_LO12_NC", - "R_AARCH64_ADR_GOT_PAGE", - "R_AARCH64_ADR_PREL_LO21", - "R_AARCH64_ADR_PREL_PG_HI21", - "R_AARCH64_ADR_PREL_PG_HI21_NC", - "R_AARCH64_CALL26", - "R_AARCH64_CONDBR19", - "R_AARCH64_COPY", - "R_AARCH64_GLOB_DAT", - "R_AARCH64_GOT_LD_PREL19", - "R_AARCH64_IRELATIVE", - "R_AARCH64_JUMP26", - "R_AARCH64_JUMP_SLOT", - "R_AARCH64_LD64_GOTOFF_LO15", - "R_AARCH64_LD64_GOTPAGE_LO15", - "R_AARCH64_LD64_GOT_LO12_NC", - "R_AARCH64_LDST128_ABS_LO12_NC", - "R_AARCH64_LDST16_ABS_LO12_NC", - "R_AARCH64_LDST32_ABS_LO12_NC", - "R_AARCH64_LDST64_ABS_LO12_NC", - "R_AARCH64_LDST8_ABS_LO12_NC", - "R_AARCH64_LD_PREL_LO19", - "R_AARCH64_MOVW_SABS_G0", - "R_AARCH64_MOVW_SABS_G1", - "R_AARCH64_MOVW_SABS_G2", - "R_AARCH64_MOVW_UABS_G0", - "R_AARCH64_MOVW_UABS_G0_NC", - "R_AARCH64_MOVW_UABS_G1", - "R_AARCH64_MOVW_UABS_G1_NC", - "R_AARCH64_MOVW_UABS_G2", - "R_AARCH64_MOVW_UABS_G2_NC", - "R_AARCH64_MOVW_UABS_G3", - "R_AARCH64_NONE", - "R_AARCH64_NULL", - "R_AARCH64_P32_ABS16", - "R_AARCH64_P32_ABS32", - "R_AARCH64_P32_ADD_ABS_LO12_NC", - "R_AARCH64_P32_ADR_GOT_PAGE", - "R_AARCH64_P32_ADR_PREL_LO21", - "R_AARCH64_P32_ADR_PREL_PG_HI21", - "R_AARCH64_P32_CALL26", - "R_AARCH64_P32_CONDBR19", - "R_AARCH64_P32_COPY", - "R_AARCH64_P32_GLOB_DAT", - "R_AARCH64_P32_GOT_LD_PREL19", - "R_AARCH64_P32_IRELATIVE", - "R_AARCH64_P32_JUMP26", - "R_AARCH64_P32_JUMP_SLOT", - "R_AARCH64_P32_LD32_GOT_LO12_NC", - "R_AARCH64_P32_LDST128_ABS_LO12_NC", - "R_AARCH64_P32_LDST16_ABS_LO12_NC", - "R_AARCH64_P32_LDST32_ABS_LO12_NC", - "R_AARCH64_P32_LDST64_ABS_LO12_NC", - "R_AARCH64_P32_LDST8_ABS_LO12_NC", - "R_AARCH64_P32_LD_PREL_LO19", - "R_AARCH64_P32_MOVW_SABS_G0", - "R_AARCH64_P32_MOVW_UABS_G0", - "R_AARCH64_P32_MOVW_UABS_G0_NC", - "R_AARCH64_P32_MOVW_UABS_G1", - "R_AARCH64_P32_PREL16", - "R_AARCH64_P32_PREL32", - "R_AARCH64_P32_RELATIVE", - "R_AARCH64_P32_TLSDESC", - "R_AARCH64_P32_TLSDESC_ADD_LO12_NC", - "R_AARCH64_P32_TLSDESC_ADR_PAGE21", - "R_AARCH64_P32_TLSDESC_ADR_PREL21", - "R_AARCH64_P32_TLSDESC_CALL", - "R_AARCH64_P32_TLSDESC_LD32_LO12_NC", - "R_AARCH64_P32_TLSDESC_LD_PREL19", - "R_AARCH64_P32_TLSGD_ADD_LO12_NC", - "R_AARCH64_P32_TLSGD_ADR_PAGE21", - "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", - "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", - "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", - "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", - "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", - "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", - "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", - "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", - "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", - "R_AARCH64_P32_TLS_DTPMOD", - "R_AARCH64_P32_TLS_DTPREL", - "R_AARCH64_P32_TLS_TPREL", - "R_AARCH64_P32_TSTBR14", - "R_AARCH64_PREL16", - "R_AARCH64_PREL32", - "R_AARCH64_PREL64", - "R_AARCH64_RELATIVE", - "R_AARCH64_TLSDESC", - "R_AARCH64_TLSDESC_ADD", - "R_AARCH64_TLSDESC_ADD_LO12_NC", - "R_AARCH64_TLSDESC_ADR_PAGE21", - "R_AARCH64_TLSDESC_ADR_PREL21", - "R_AARCH64_TLSDESC_CALL", - "R_AARCH64_TLSDESC_LD64_LO12_NC", - "R_AARCH64_TLSDESC_LDR", - "R_AARCH64_TLSDESC_LD_PREL19", - "R_AARCH64_TLSDESC_OFF_G0_NC", - "R_AARCH64_TLSDESC_OFF_G1", - "R_AARCH64_TLSGD_ADD_LO12_NC", - "R_AARCH64_TLSGD_ADR_PAGE21", - "R_AARCH64_TLSGD_ADR_PREL21", - "R_AARCH64_TLSGD_MOVW_G0_NC", - "R_AARCH64_TLSGD_MOVW_G1", - "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", - "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", - "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", - "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", - "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", - "R_AARCH64_TLSLD_ADR_PAGE21", - "R_AARCH64_TLSLD_ADR_PREL21", - "R_AARCH64_TLSLD_LDST128_DTPREL_LO12", - "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", - "R_AARCH64_TLSLE_ADD_TPREL_HI12", - "R_AARCH64_TLSLE_ADD_TPREL_LO12", - "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", - "R_AARCH64_TLSLE_LDST128_TPREL_LO12", - "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", - "R_AARCH64_TLSLE_MOVW_TPREL_G0", - "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", - "R_AARCH64_TLSLE_MOVW_TPREL_G1", - "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", - "R_AARCH64_TLSLE_MOVW_TPREL_G2", - "R_AARCH64_TLS_DTPMOD64", - "R_AARCH64_TLS_DTPREL64", - "R_AARCH64_TLS_TPREL64", - "R_AARCH64_TSTBR14", - "R_ALPHA", - "R_ALPHA_BRADDR", - "R_ALPHA_COPY", - "R_ALPHA_GLOB_DAT", - "R_ALPHA_GPDISP", - "R_ALPHA_GPREL32", - "R_ALPHA_GPRELHIGH", - "R_ALPHA_GPRELLOW", - "R_ALPHA_GPVALUE", - "R_ALPHA_HINT", - "R_ALPHA_IMMED_BR_HI32", - "R_ALPHA_IMMED_GP_16", - "R_ALPHA_IMMED_GP_HI32", - "R_ALPHA_IMMED_LO32", - "R_ALPHA_IMMED_SCN_HI32", - "R_ALPHA_JMP_SLOT", - "R_ALPHA_LITERAL", - "R_ALPHA_LITUSE", - "R_ALPHA_NONE", - "R_ALPHA_OP_PRSHIFT", - "R_ALPHA_OP_PSUB", - "R_ALPHA_OP_PUSH", - "R_ALPHA_OP_STORE", - "R_ALPHA_REFLONG", - "R_ALPHA_REFQUAD", - "R_ALPHA_RELATIVE", - "R_ALPHA_SREL16", - "R_ALPHA_SREL32", - "R_ALPHA_SREL64", - "R_ARM", - "R_ARM_ABS12", - "R_ARM_ABS16", - "R_ARM_ABS32", - "R_ARM_ABS32_NOI", - "R_ARM_ABS8", - "R_ARM_ALU_PCREL_15_8", - "R_ARM_ALU_PCREL_23_15", - "R_ARM_ALU_PCREL_7_0", - "R_ARM_ALU_PC_G0", - "R_ARM_ALU_PC_G0_NC", - "R_ARM_ALU_PC_G1", - "R_ARM_ALU_PC_G1_NC", - "R_ARM_ALU_PC_G2", - "R_ARM_ALU_SBREL_19_12_NC", - "R_ARM_ALU_SBREL_27_20_CK", - "R_ARM_ALU_SB_G0", - "R_ARM_ALU_SB_G0_NC", - "R_ARM_ALU_SB_G1", - "R_ARM_ALU_SB_G1_NC", - "R_ARM_ALU_SB_G2", - "R_ARM_AMP_VCALL9", - "R_ARM_BASE_ABS", - "R_ARM_CALL", - "R_ARM_COPY", - "R_ARM_GLOB_DAT", - "R_ARM_GNU_VTENTRY", - "R_ARM_GNU_VTINHERIT", - "R_ARM_GOT32", - "R_ARM_GOTOFF", - "R_ARM_GOTOFF12", - "R_ARM_GOTPC", - "R_ARM_GOTRELAX", - "R_ARM_GOT_ABS", - "R_ARM_GOT_BREL12", - "R_ARM_GOT_PREL", - "R_ARM_IRELATIVE", - "R_ARM_JUMP24", - "R_ARM_JUMP_SLOT", - "R_ARM_LDC_PC_G0", - "R_ARM_LDC_PC_G1", - "R_ARM_LDC_PC_G2", - "R_ARM_LDC_SB_G0", - "R_ARM_LDC_SB_G1", - "R_ARM_LDC_SB_G2", - "R_ARM_LDRS_PC_G0", - "R_ARM_LDRS_PC_G1", - "R_ARM_LDRS_PC_G2", - "R_ARM_LDRS_SB_G0", - "R_ARM_LDRS_SB_G1", - "R_ARM_LDRS_SB_G2", - "R_ARM_LDR_PC_G1", - "R_ARM_LDR_PC_G2", - "R_ARM_LDR_SBREL_11_10_NC", - "R_ARM_LDR_SB_G0", - "R_ARM_LDR_SB_G1", - "R_ARM_LDR_SB_G2", - "R_ARM_ME_TOO", - "R_ARM_MOVT_ABS", - "R_ARM_MOVT_BREL", - "R_ARM_MOVT_PREL", - "R_ARM_MOVW_ABS_NC", - "R_ARM_MOVW_BREL", - "R_ARM_MOVW_BREL_NC", - "R_ARM_MOVW_PREL_NC", - "R_ARM_NONE", - "R_ARM_PC13", - "R_ARM_PC24", - "R_ARM_PLT32", - "R_ARM_PLT32_ABS", - "R_ARM_PREL31", - "R_ARM_PRIVATE_0", - "R_ARM_PRIVATE_1", - "R_ARM_PRIVATE_10", - "R_ARM_PRIVATE_11", - "R_ARM_PRIVATE_12", - "R_ARM_PRIVATE_13", - "R_ARM_PRIVATE_14", - "R_ARM_PRIVATE_15", - "R_ARM_PRIVATE_2", - "R_ARM_PRIVATE_3", - "R_ARM_PRIVATE_4", - "R_ARM_PRIVATE_5", - "R_ARM_PRIVATE_6", - "R_ARM_PRIVATE_7", - "R_ARM_PRIVATE_8", - "R_ARM_PRIVATE_9", - "R_ARM_RABS32", - "R_ARM_RBASE", - "R_ARM_REL32", - "R_ARM_REL32_NOI", - "R_ARM_RELATIVE", - "R_ARM_RPC24", - "R_ARM_RREL32", - "R_ARM_RSBREL32", - "R_ARM_RXPC25", - "R_ARM_SBREL31", - "R_ARM_SBREL32", - "R_ARM_SWI24", - "R_ARM_TARGET1", - "R_ARM_TARGET2", - "R_ARM_THM_ABS5", - "R_ARM_THM_ALU_ABS_G0_NC", - "R_ARM_THM_ALU_ABS_G1_NC", - "R_ARM_THM_ALU_ABS_G2_NC", - "R_ARM_THM_ALU_ABS_G3", - "R_ARM_THM_ALU_PREL_11_0", - "R_ARM_THM_GOT_BREL12", - "R_ARM_THM_JUMP11", - "R_ARM_THM_JUMP19", - "R_ARM_THM_JUMP24", - "R_ARM_THM_JUMP6", - "R_ARM_THM_JUMP8", - "R_ARM_THM_MOVT_ABS", - "R_ARM_THM_MOVT_BREL", - "R_ARM_THM_MOVT_PREL", - "R_ARM_THM_MOVW_ABS_NC", - "R_ARM_THM_MOVW_BREL", - "R_ARM_THM_MOVW_BREL_NC", - "R_ARM_THM_MOVW_PREL_NC", - "R_ARM_THM_PC12", - "R_ARM_THM_PC22", - "R_ARM_THM_PC8", - "R_ARM_THM_RPC22", - "R_ARM_THM_SWI8", - "R_ARM_THM_TLS_CALL", - "R_ARM_THM_TLS_DESCSEQ16", - "R_ARM_THM_TLS_DESCSEQ32", - "R_ARM_THM_XPC22", - "R_ARM_TLS_CALL", - "R_ARM_TLS_DESCSEQ", - "R_ARM_TLS_DTPMOD32", - "R_ARM_TLS_DTPOFF32", - "R_ARM_TLS_GD32", - "R_ARM_TLS_GOTDESC", - "R_ARM_TLS_IE12GP", - "R_ARM_TLS_IE32", - "R_ARM_TLS_LDM32", - "R_ARM_TLS_LDO12", - "R_ARM_TLS_LDO32", - "R_ARM_TLS_LE12", - "R_ARM_TLS_LE32", - "R_ARM_TLS_TPOFF32", - "R_ARM_V4BX", - "R_ARM_XPC25", - "R_INFO", - "R_INFO32", - "R_LARCH", - "R_LARCH_32", - "R_LARCH_32_PCREL", - "R_LARCH_64", - "R_LARCH_ABS64_HI12", - "R_LARCH_ABS64_LO20", - "R_LARCH_ABS_HI20", - "R_LARCH_ABS_LO12", - "R_LARCH_ADD16", - "R_LARCH_ADD24", - "R_LARCH_ADD32", - "R_LARCH_ADD64", - "R_LARCH_ADD8", - "R_LARCH_B16", - "R_LARCH_B21", - "R_LARCH_B26", - "R_LARCH_COPY", - "R_LARCH_GNU_VTENTRY", - "R_LARCH_GNU_VTINHERIT", - "R_LARCH_GOT64_HI12", - "R_LARCH_GOT64_LO20", - "R_LARCH_GOT64_PC_HI12", - "R_LARCH_GOT64_PC_LO20", - "R_LARCH_GOT_HI20", - "R_LARCH_GOT_LO12", - "R_LARCH_GOT_PC_HI20", - "R_LARCH_GOT_PC_LO12", - "R_LARCH_IRELATIVE", - "R_LARCH_JUMP_SLOT", - "R_LARCH_MARK_LA", - "R_LARCH_MARK_PCREL", - "R_LARCH_NONE", - "R_LARCH_PCALA64_HI12", - "R_LARCH_PCALA64_LO20", - "R_LARCH_PCALA_HI20", - "R_LARCH_PCALA_LO12", - "R_LARCH_RELATIVE", - "R_LARCH_RELAX", - "R_LARCH_SOP_ADD", - "R_LARCH_SOP_AND", - "R_LARCH_SOP_ASSERT", - "R_LARCH_SOP_IF_ELSE", - "R_LARCH_SOP_NOT", - "R_LARCH_SOP_POP_32_S_0_10_10_16_S2", - "R_LARCH_SOP_POP_32_S_0_5_10_16_S2", - "R_LARCH_SOP_POP_32_S_10_12", - "R_LARCH_SOP_POP_32_S_10_16", - "R_LARCH_SOP_POP_32_S_10_16_S2", - "R_LARCH_SOP_POP_32_S_10_5", - "R_LARCH_SOP_POP_32_S_5_20", - "R_LARCH_SOP_POP_32_U", - "R_LARCH_SOP_POP_32_U_10_12", - "R_LARCH_SOP_PUSH_ABSOLUTE", - "R_LARCH_SOP_PUSH_DUP", - "R_LARCH_SOP_PUSH_GPREL", - "R_LARCH_SOP_PUSH_PCREL", - "R_LARCH_SOP_PUSH_PLT_PCREL", - "R_LARCH_SOP_PUSH_TLS_GD", - "R_LARCH_SOP_PUSH_TLS_GOT", - "R_LARCH_SOP_PUSH_TLS_TPREL", - "R_LARCH_SOP_SL", - "R_LARCH_SOP_SR", - "R_LARCH_SOP_SUB", - "R_LARCH_SUB16", - "R_LARCH_SUB24", - "R_LARCH_SUB32", - "R_LARCH_SUB64", - "R_LARCH_SUB8", - "R_LARCH_TLS_DTPMOD32", - "R_LARCH_TLS_DTPMOD64", - "R_LARCH_TLS_DTPREL32", - "R_LARCH_TLS_DTPREL64", - "R_LARCH_TLS_GD_HI20", - "R_LARCH_TLS_GD_PC_HI20", - "R_LARCH_TLS_IE64_HI12", - "R_LARCH_TLS_IE64_LO20", - "R_LARCH_TLS_IE64_PC_HI12", - "R_LARCH_TLS_IE64_PC_LO20", - "R_LARCH_TLS_IE_HI20", - "R_LARCH_TLS_IE_LO12", - "R_LARCH_TLS_IE_PC_HI20", - "R_LARCH_TLS_IE_PC_LO12", - "R_LARCH_TLS_LD_HI20", - "R_LARCH_TLS_LD_PC_HI20", - "R_LARCH_TLS_LE64_HI12", - "R_LARCH_TLS_LE64_LO20", - "R_LARCH_TLS_LE_HI20", - "R_LARCH_TLS_LE_LO12", - "R_LARCH_TLS_TPREL32", - "R_LARCH_TLS_TPREL64", - "R_MIPS", - "R_MIPS_16", - "R_MIPS_26", - "R_MIPS_32", - "R_MIPS_64", - "R_MIPS_ADD_IMMEDIATE", - "R_MIPS_CALL16", - "R_MIPS_CALL_HI16", - "R_MIPS_CALL_LO16", - "R_MIPS_DELETE", - "R_MIPS_GOT16", - "R_MIPS_GOT_DISP", - "R_MIPS_GOT_HI16", - "R_MIPS_GOT_LO16", - "R_MIPS_GOT_OFST", - "R_MIPS_GOT_PAGE", - "R_MIPS_GPREL16", - "R_MIPS_GPREL32", - "R_MIPS_HI16", - "R_MIPS_HIGHER", - "R_MIPS_HIGHEST", - "R_MIPS_INSERT_A", - "R_MIPS_INSERT_B", - "R_MIPS_JALR", - "R_MIPS_LITERAL", - "R_MIPS_LO16", - "R_MIPS_NONE", - "R_MIPS_PC16", - "R_MIPS_PJUMP", - "R_MIPS_REL16", - "R_MIPS_REL32", - "R_MIPS_RELGOT", - "R_MIPS_SCN_DISP", - "R_MIPS_SHIFT5", - "R_MIPS_SHIFT6", - "R_MIPS_SUB", - "R_MIPS_TLS_DTPMOD32", - "R_MIPS_TLS_DTPMOD64", - "R_MIPS_TLS_DTPREL32", - "R_MIPS_TLS_DTPREL64", - "R_MIPS_TLS_DTPREL_HI16", - "R_MIPS_TLS_DTPREL_LO16", - "R_MIPS_TLS_GD", - "R_MIPS_TLS_GOTTPREL", - "R_MIPS_TLS_LDM", - "R_MIPS_TLS_TPREL32", - "R_MIPS_TLS_TPREL64", - "R_MIPS_TLS_TPREL_HI16", - "R_MIPS_TLS_TPREL_LO16", - "R_PPC", - "R_PPC64", - "R_PPC64_ADDR14", - "R_PPC64_ADDR14_BRNTAKEN", - "R_PPC64_ADDR14_BRTAKEN", - "R_PPC64_ADDR16", - "R_PPC64_ADDR16_DS", - "R_PPC64_ADDR16_HA", - "R_PPC64_ADDR16_HI", - "R_PPC64_ADDR16_HIGH", - "R_PPC64_ADDR16_HIGHA", - "R_PPC64_ADDR16_HIGHER", - "R_PPC64_ADDR16_HIGHER34", - "R_PPC64_ADDR16_HIGHERA", - "R_PPC64_ADDR16_HIGHERA34", - "R_PPC64_ADDR16_HIGHEST", - "R_PPC64_ADDR16_HIGHEST34", - "R_PPC64_ADDR16_HIGHESTA", - "R_PPC64_ADDR16_HIGHESTA34", - "R_PPC64_ADDR16_LO", - "R_PPC64_ADDR16_LO_DS", - "R_PPC64_ADDR24", - "R_PPC64_ADDR32", - "R_PPC64_ADDR64", - "R_PPC64_ADDR64_LOCAL", - "R_PPC64_COPY", - "R_PPC64_D28", - "R_PPC64_D34", - "R_PPC64_D34_HA30", - "R_PPC64_D34_HI30", - "R_PPC64_D34_LO", - "R_PPC64_DTPMOD64", - "R_PPC64_DTPREL16", - "R_PPC64_DTPREL16_DS", - "R_PPC64_DTPREL16_HA", - "R_PPC64_DTPREL16_HI", - "R_PPC64_DTPREL16_HIGH", - "R_PPC64_DTPREL16_HIGHA", - "R_PPC64_DTPREL16_HIGHER", - "R_PPC64_DTPREL16_HIGHERA", - "R_PPC64_DTPREL16_HIGHEST", - "R_PPC64_DTPREL16_HIGHESTA", - "R_PPC64_DTPREL16_LO", - "R_PPC64_DTPREL16_LO_DS", - "R_PPC64_DTPREL34", - "R_PPC64_DTPREL64", - "R_PPC64_ENTRY", - "R_PPC64_GLOB_DAT", - "R_PPC64_GNU_VTENTRY", - "R_PPC64_GNU_VTINHERIT", - "R_PPC64_GOT16", - "R_PPC64_GOT16_DS", - "R_PPC64_GOT16_HA", - "R_PPC64_GOT16_HI", - "R_PPC64_GOT16_LO", - "R_PPC64_GOT16_LO_DS", - "R_PPC64_GOT_DTPREL16_DS", - "R_PPC64_GOT_DTPREL16_HA", - "R_PPC64_GOT_DTPREL16_HI", - "R_PPC64_GOT_DTPREL16_LO_DS", - "R_PPC64_GOT_DTPREL_PCREL34", - "R_PPC64_GOT_PCREL34", - "R_PPC64_GOT_TLSGD16", - "R_PPC64_GOT_TLSGD16_HA", - "R_PPC64_GOT_TLSGD16_HI", - "R_PPC64_GOT_TLSGD16_LO", - "R_PPC64_GOT_TLSGD_PCREL34", - "R_PPC64_GOT_TLSLD16", - "R_PPC64_GOT_TLSLD16_HA", - "R_PPC64_GOT_TLSLD16_HI", - "R_PPC64_GOT_TLSLD16_LO", - "R_PPC64_GOT_TLSLD_PCREL34", - "R_PPC64_GOT_TPREL16_DS", - "R_PPC64_GOT_TPREL16_HA", - "R_PPC64_GOT_TPREL16_HI", - "R_PPC64_GOT_TPREL16_LO_DS", - "R_PPC64_GOT_TPREL_PCREL34", - "R_PPC64_IRELATIVE", - "R_PPC64_JMP_IREL", - "R_PPC64_JMP_SLOT", - "R_PPC64_NONE", - "R_PPC64_PCREL28", - "R_PPC64_PCREL34", - "R_PPC64_PCREL_OPT", - "R_PPC64_PLT16_HA", - "R_PPC64_PLT16_HI", - "R_PPC64_PLT16_LO", - "R_PPC64_PLT16_LO_DS", - "R_PPC64_PLT32", - "R_PPC64_PLT64", - "R_PPC64_PLTCALL", - "R_PPC64_PLTCALL_NOTOC", - "R_PPC64_PLTGOT16", - "R_PPC64_PLTGOT16_DS", - "R_PPC64_PLTGOT16_HA", - "R_PPC64_PLTGOT16_HI", - "R_PPC64_PLTGOT16_LO", - "R_PPC64_PLTGOT_LO_DS", - "R_PPC64_PLTREL32", - "R_PPC64_PLTREL64", - "R_PPC64_PLTSEQ", - "R_PPC64_PLTSEQ_NOTOC", - "R_PPC64_PLT_PCREL34", - "R_PPC64_PLT_PCREL34_NOTOC", - "R_PPC64_REL14", - "R_PPC64_REL14_BRNTAKEN", - "R_PPC64_REL14_BRTAKEN", - "R_PPC64_REL16", - "R_PPC64_REL16DX_HA", - "R_PPC64_REL16_HA", - "R_PPC64_REL16_HI", - "R_PPC64_REL16_HIGH", - "R_PPC64_REL16_HIGHA", - "R_PPC64_REL16_HIGHER", - "R_PPC64_REL16_HIGHER34", - "R_PPC64_REL16_HIGHERA", - "R_PPC64_REL16_HIGHERA34", - "R_PPC64_REL16_HIGHEST", - "R_PPC64_REL16_HIGHEST34", - "R_PPC64_REL16_HIGHESTA", - "R_PPC64_REL16_HIGHESTA34", - "R_PPC64_REL16_LO", - "R_PPC64_REL24", - "R_PPC64_REL24_NOTOC", - "R_PPC64_REL24_P9NOTOC", - "R_PPC64_REL30", - "R_PPC64_REL32", - "R_PPC64_REL64", - "R_PPC64_RELATIVE", - "R_PPC64_SECTOFF", - "R_PPC64_SECTOFF_DS", - "R_PPC64_SECTOFF_HA", - "R_PPC64_SECTOFF_HI", - "R_PPC64_SECTOFF_LO", - "R_PPC64_SECTOFF_LO_DS", - "R_PPC64_TLS", - "R_PPC64_TLSGD", - "R_PPC64_TLSLD", - "R_PPC64_TOC", - "R_PPC64_TOC16", - "R_PPC64_TOC16_DS", - "R_PPC64_TOC16_HA", - "R_PPC64_TOC16_HI", - "R_PPC64_TOC16_LO", - "R_PPC64_TOC16_LO_DS", - "R_PPC64_TOCSAVE", - "R_PPC64_TPREL16", - "R_PPC64_TPREL16_DS", - "R_PPC64_TPREL16_HA", - "R_PPC64_TPREL16_HI", - "R_PPC64_TPREL16_HIGH", - "R_PPC64_TPREL16_HIGHA", - "R_PPC64_TPREL16_HIGHER", - "R_PPC64_TPREL16_HIGHERA", - "R_PPC64_TPREL16_HIGHEST", - "R_PPC64_TPREL16_HIGHESTA", - "R_PPC64_TPREL16_LO", - "R_PPC64_TPREL16_LO_DS", - "R_PPC64_TPREL34", - "R_PPC64_TPREL64", - "R_PPC64_UADDR16", - "R_PPC64_UADDR32", - "R_PPC64_UADDR64", - "R_PPC_ADDR14", - "R_PPC_ADDR14_BRNTAKEN", - "R_PPC_ADDR14_BRTAKEN", - "R_PPC_ADDR16", - "R_PPC_ADDR16_HA", - "R_PPC_ADDR16_HI", - "R_PPC_ADDR16_LO", - "R_PPC_ADDR24", - "R_PPC_ADDR32", - "R_PPC_COPY", - "R_PPC_DTPMOD32", - "R_PPC_DTPREL16", - "R_PPC_DTPREL16_HA", - "R_PPC_DTPREL16_HI", - "R_PPC_DTPREL16_LO", - "R_PPC_DTPREL32", - "R_PPC_EMB_BIT_FLD", - "R_PPC_EMB_MRKREF", - "R_PPC_EMB_NADDR16", - "R_PPC_EMB_NADDR16_HA", - "R_PPC_EMB_NADDR16_HI", - "R_PPC_EMB_NADDR16_LO", - "R_PPC_EMB_NADDR32", - "R_PPC_EMB_RELSDA", - "R_PPC_EMB_RELSEC16", - "R_PPC_EMB_RELST_HA", - "R_PPC_EMB_RELST_HI", - "R_PPC_EMB_RELST_LO", - "R_PPC_EMB_SDA21", - "R_PPC_EMB_SDA2I16", - "R_PPC_EMB_SDA2REL", - "R_PPC_EMB_SDAI16", - "R_PPC_GLOB_DAT", - "R_PPC_GOT16", - "R_PPC_GOT16_HA", - "R_PPC_GOT16_HI", - "R_PPC_GOT16_LO", - "R_PPC_GOT_TLSGD16", - "R_PPC_GOT_TLSGD16_HA", - "R_PPC_GOT_TLSGD16_HI", - "R_PPC_GOT_TLSGD16_LO", - "R_PPC_GOT_TLSLD16", - "R_PPC_GOT_TLSLD16_HA", - "R_PPC_GOT_TLSLD16_HI", - "R_PPC_GOT_TLSLD16_LO", - "R_PPC_GOT_TPREL16", - "R_PPC_GOT_TPREL16_HA", - "R_PPC_GOT_TPREL16_HI", - "R_PPC_GOT_TPREL16_LO", - "R_PPC_JMP_SLOT", - "R_PPC_LOCAL24PC", - "R_PPC_NONE", - "R_PPC_PLT16_HA", - "R_PPC_PLT16_HI", - "R_PPC_PLT16_LO", - "R_PPC_PLT32", - "R_PPC_PLTREL24", - "R_PPC_PLTREL32", - "R_PPC_REL14", - "R_PPC_REL14_BRNTAKEN", - "R_PPC_REL14_BRTAKEN", - "R_PPC_REL24", - "R_PPC_REL32", - "R_PPC_RELATIVE", - "R_PPC_SDAREL16", - "R_PPC_SECTOFF", - "R_PPC_SECTOFF_HA", - "R_PPC_SECTOFF_HI", - "R_PPC_SECTOFF_LO", - "R_PPC_TLS", - "R_PPC_TPREL16", - "R_PPC_TPREL16_HA", - "R_PPC_TPREL16_HI", - "R_PPC_TPREL16_LO", - "R_PPC_TPREL32", - "R_PPC_UADDR16", - "R_PPC_UADDR32", - "R_RISCV", - "R_RISCV_32", - "R_RISCV_32_PCREL", - "R_RISCV_64", - "R_RISCV_ADD16", - "R_RISCV_ADD32", - "R_RISCV_ADD64", - "R_RISCV_ADD8", - "R_RISCV_ALIGN", - "R_RISCV_BRANCH", - "R_RISCV_CALL", - "R_RISCV_CALL_PLT", - "R_RISCV_COPY", - "R_RISCV_GNU_VTENTRY", - "R_RISCV_GNU_VTINHERIT", - "R_RISCV_GOT_HI20", - "R_RISCV_GPREL_I", - "R_RISCV_GPREL_S", - "R_RISCV_HI20", - "R_RISCV_JAL", - "R_RISCV_JUMP_SLOT", - "R_RISCV_LO12_I", - "R_RISCV_LO12_S", - "R_RISCV_NONE", - "R_RISCV_PCREL_HI20", - "R_RISCV_PCREL_LO12_I", - "R_RISCV_PCREL_LO12_S", - "R_RISCV_RELATIVE", - "R_RISCV_RELAX", - "R_RISCV_RVC_BRANCH", - "R_RISCV_RVC_JUMP", - "R_RISCV_RVC_LUI", - "R_RISCV_SET16", - "R_RISCV_SET32", - "R_RISCV_SET6", - "R_RISCV_SET8", - "R_RISCV_SUB16", - "R_RISCV_SUB32", - "R_RISCV_SUB6", - "R_RISCV_SUB64", - "R_RISCV_SUB8", - "R_RISCV_TLS_DTPMOD32", - "R_RISCV_TLS_DTPMOD64", - "R_RISCV_TLS_DTPREL32", - "R_RISCV_TLS_DTPREL64", - "R_RISCV_TLS_GD_HI20", - "R_RISCV_TLS_GOT_HI20", - "R_RISCV_TLS_TPREL32", - "R_RISCV_TLS_TPREL64", - "R_RISCV_TPREL_ADD", - "R_RISCV_TPREL_HI20", - "R_RISCV_TPREL_I", - "R_RISCV_TPREL_LO12_I", - "R_RISCV_TPREL_LO12_S", - "R_RISCV_TPREL_S", - "R_SPARC", - "R_SPARC_10", - "R_SPARC_11", - "R_SPARC_13", - "R_SPARC_16", - "R_SPARC_22", - "R_SPARC_32", - "R_SPARC_5", - "R_SPARC_6", - "R_SPARC_64", - "R_SPARC_7", - "R_SPARC_8", - "R_SPARC_COPY", - "R_SPARC_DISP16", - "R_SPARC_DISP32", - "R_SPARC_DISP64", - "R_SPARC_DISP8", - "R_SPARC_GLOB_DAT", - "R_SPARC_GLOB_JMP", - "R_SPARC_GOT10", - "R_SPARC_GOT13", - "R_SPARC_GOT22", - "R_SPARC_H44", - "R_SPARC_HH22", - "R_SPARC_HI22", - "R_SPARC_HIPLT22", - "R_SPARC_HIX22", - "R_SPARC_HM10", - "R_SPARC_JMP_SLOT", - "R_SPARC_L44", - "R_SPARC_LM22", - "R_SPARC_LO10", - "R_SPARC_LOPLT10", - "R_SPARC_LOX10", - "R_SPARC_M44", - "R_SPARC_NONE", - "R_SPARC_OLO10", - "R_SPARC_PC10", - "R_SPARC_PC22", - "R_SPARC_PCPLT10", - "R_SPARC_PCPLT22", - "R_SPARC_PCPLT32", - "R_SPARC_PC_HH22", - "R_SPARC_PC_HM10", - "R_SPARC_PC_LM22", - "R_SPARC_PLT32", - "R_SPARC_PLT64", - "R_SPARC_REGISTER", - "R_SPARC_RELATIVE", - "R_SPARC_UA16", - "R_SPARC_UA32", - "R_SPARC_UA64", - "R_SPARC_WDISP16", - "R_SPARC_WDISP19", - "R_SPARC_WDISP22", - "R_SPARC_WDISP30", - "R_SPARC_WPLT30", - "R_SYM32", - "R_SYM64", - "R_TYPE32", - "R_TYPE64", - "R_X86_64", - "R_X86_64_16", - "R_X86_64_32", - "R_X86_64_32S", - "R_X86_64_64", - "R_X86_64_8", - "R_X86_64_COPY", - "R_X86_64_DTPMOD64", - "R_X86_64_DTPOFF32", - "R_X86_64_DTPOFF64", - "R_X86_64_GLOB_DAT", - "R_X86_64_GOT32", - "R_X86_64_GOT64", - "R_X86_64_GOTOFF64", - "R_X86_64_GOTPC32", - "R_X86_64_GOTPC32_TLSDESC", - "R_X86_64_GOTPC64", - "R_X86_64_GOTPCREL", - "R_X86_64_GOTPCREL64", - "R_X86_64_GOTPCRELX", - "R_X86_64_GOTPLT64", - "R_X86_64_GOTTPOFF", - "R_X86_64_IRELATIVE", - "R_X86_64_JMP_SLOT", - "R_X86_64_NONE", - "R_X86_64_PC16", - "R_X86_64_PC32", - "R_X86_64_PC32_BND", - "R_X86_64_PC64", - "R_X86_64_PC8", - "R_X86_64_PLT32", - "R_X86_64_PLT32_BND", - "R_X86_64_PLTOFF64", - "R_X86_64_RELATIVE", - "R_X86_64_RELATIVE64", - "R_X86_64_REX_GOTPCRELX", - "R_X86_64_SIZE32", - "R_X86_64_SIZE64", - "R_X86_64_TLSDESC", - "R_X86_64_TLSDESC_CALL", - "R_X86_64_TLSGD", - "R_X86_64_TLSLD", - "R_X86_64_TPOFF32", - "R_X86_64_TPOFF64", - "Rel32", - "Rel64", - "Rela32", - "Rela64", - "SHF_ALLOC", - "SHF_COMPRESSED", - "SHF_EXECINSTR", - "SHF_GROUP", - "SHF_INFO_LINK", - "SHF_LINK_ORDER", - "SHF_MASKOS", - "SHF_MASKPROC", - "SHF_MERGE", - "SHF_OS_NONCONFORMING", - "SHF_STRINGS", - "SHF_TLS", - "SHF_WRITE", - "SHN_ABS", - "SHN_COMMON", - "SHN_HIOS", - "SHN_HIPROC", - "SHN_HIRESERVE", - "SHN_LOOS", - "SHN_LOPROC", - "SHN_LORESERVE", - "SHN_UNDEF", - "SHN_XINDEX", - "SHT_DYNAMIC", - "SHT_DYNSYM", - "SHT_FINI_ARRAY", - "SHT_GNU_ATTRIBUTES", - "SHT_GNU_HASH", - "SHT_GNU_LIBLIST", - "SHT_GNU_VERDEF", - "SHT_GNU_VERNEED", - "SHT_GNU_VERSYM", - "SHT_GROUP", - "SHT_HASH", - "SHT_HIOS", - "SHT_HIPROC", - "SHT_HIUSER", - "SHT_INIT_ARRAY", - "SHT_LOOS", - "SHT_LOPROC", - "SHT_LOUSER", - "SHT_MIPS_ABIFLAGS", - "SHT_NOBITS", - "SHT_NOTE", - "SHT_NULL", - "SHT_PREINIT_ARRAY", - "SHT_PROGBITS", - "SHT_REL", - "SHT_RELA", - "SHT_SHLIB", - "SHT_STRTAB", - "SHT_SYMTAB", - "SHT_SYMTAB_SHNDX", - "STB_GLOBAL", - "STB_HIOS", - "STB_HIPROC", - "STB_LOCAL", - "STB_LOOS", - "STB_LOPROC", - "STB_WEAK", - "STT_COMMON", - "STT_FILE", - "STT_FUNC", - "STT_HIOS", - "STT_HIPROC", - "STT_LOOS", - "STT_LOPROC", - "STT_NOTYPE", - "STT_OBJECT", - "STT_SECTION", - "STT_TLS", - "STV_DEFAULT", - "STV_HIDDEN", - "STV_INTERNAL", - "STV_PROTECTED", - "ST_BIND", - "ST_INFO", - "ST_TYPE", - "ST_VISIBILITY", - "Section", - "Section32", - "Section64", - "SectionFlag", - "SectionHeader", - "SectionIndex", - "SectionType", - "Sym32", - "Sym32Size", - "Sym64", - "Sym64Size", - "SymBind", - "SymType", - "SymVis", - "Symbol", - "Type", - "Version", - }, - "debug/gosym": { - "DecodingError", - "Func", - "LineTable", - "NewLineTable", - "NewTable", - "Obj", - "Sym", - "Table", - "UnknownFileError", - "UnknownLineError", - }, - "debug/macho": { - "ARM64_RELOC_ADDEND", - "ARM64_RELOC_BRANCH26", - "ARM64_RELOC_GOT_LOAD_PAGE21", - "ARM64_RELOC_GOT_LOAD_PAGEOFF12", - "ARM64_RELOC_PAGE21", - "ARM64_RELOC_PAGEOFF12", - "ARM64_RELOC_POINTER_TO_GOT", - "ARM64_RELOC_SUBTRACTOR", - "ARM64_RELOC_TLVP_LOAD_PAGE21", - "ARM64_RELOC_TLVP_LOAD_PAGEOFF12", - "ARM64_RELOC_UNSIGNED", - "ARM_RELOC_BR24", - "ARM_RELOC_HALF", - "ARM_RELOC_HALF_SECTDIFF", - "ARM_RELOC_LOCAL_SECTDIFF", - "ARM_RELOC_PAIR", - "ARM_RELOC_PB_LA_PTR", - "ARM_RELOC_SECTDIFF", - "ARM_RELOC_VANILLA", - "ARM_THUMB_32BIT_BRANCH", - "ARM_THUMB_RELOC_BR22", - "Cpu", - "Cpu386", - "CpuAmd64", - "CpuArm", - "CpuArm64", - "CpuPpc", - "CpuPpc64", - "Dylib", - "DylibCmd", - "Dysymtab", - "DysymtabCmd", - "ErrNotFat", - "FatArch", - "FatArchHeader", - "FatFile", - "File", - "FileHeader", - "FlagAllModsBound", - "FlagAllowStackExecution", - "FlagAppExtensionSafe", - "FlagBindAtLoad", - "FlagBindsToWeak", - "FlagCanonical", - "FlagDeadStrippableDylib", - "FlagDyldLink", - "FlagForceFlat", - "FlagHasTLVDescriptors", - "FlagIncrLink", - "FlagLazyInit", - "FlagNoFixPrebinding", - "FlagNoHeapExecution", - "FlagNoMultiDefs", - "FlagNoReexportedDylibs", - "FlagNoUndefs", - "FlagPIE", - "FlagPrebindable", - "FlagPrebound", - "FlagRootSafe", - "FlagSetuidSafe", - "FlagSplitSegs", - "FlagSubsectionsViaSymbols", - "FlagTwoLevel", - "FlagWeakDefines", - "FormatError", - "GENERIC_RELOC_LOCAL_SECTDIFF", - "GENERIC_RELOC_PAIR", - "GENERIC_RELOC_PB_LA_PTR", - "GENERIC_RELOC_SECTDIFF", - "GENERIC_RELOC_TLV", - "GENERIC_RELOC_VANILLA", - "Load", - "LoadBytes", - "LoadCmd", - "LoadCmdDylib", - "LoadCmdDylinker", - "LoadCmdDysymtab", - "LoadCmdRpath", - "LoadCmdSegment", - "LoadCmdSegment64", - "LoadCmdSymtab", - "LoadCmdThread", - "LoadCmdUnixThread", - "Magic32", - "Magic64", - "MagicFat", - "NewFatFile", - "NewFile", - "Nlist32", - "Nlist64", - "Open", - "OpenFat", - "Regs386", - "RegsAMD64", - "Reloc", - "RelocTypeARM", - "RelocTypeARM64", - "RelocTypeGeneric", - "RelocTypeX86_64", - "Rpath", - "RpathCmd", - "Section", - "Section32", - "Section64", - "SectionHeader", - "Segment", - "Segment32", - "Segment64", - "SegmentHeader", - "Symbol", - "Symtab", - "SymtabCmd", - "Thread", - "Type", - "TypeBundle", - "TypeDylib", - "TypeExec", - "TypeObj", - "X86_64_RELOC_BRANCH", - "X86_64_RELOC_GOT", - "X86_64_RELOC_GOT_LOAD", - "X86_64_RELOC_SIGNED", - "X86_64_RELOC_SIGNED_1", - "X86_64_RELOC_SIGNED_2", - "X86_64_RELOC_SIGNED_4", - "X86_64_RELOC_SUBTRACTOR", - "X86_64_RELOC_TLV", - "X86_64_RELOC_UNSIGNED", - }, - "debug/pe": { - "COFFSymbol", - "COFFSymbolAuxFormat5", - "COFFSymbolSize", - "DataDirectory", - "File", - "FileHeader", - "FormatError", - "IMAGE_COMDAT_SELECT_ANY", - "IMAGE_COMDAT_SELECT_ASSOCIATIVE", - "IMAGE_COMDAT_SELECT_EXACT_MATCH", - "IMAGE_COMDAT_SELECT_LARGEST", - "IMAGE_COMDAT_SELECT_NODUPLICATES", - "IMAGE_COMDAT_SELECT_SAME_SIZE", - "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", - "IMAGE_DIRECTORY_ENTRY_BASERELOC", - "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", - "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", - "IMAGE_DIRECTORY_ENTRY_DEBUG", - "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", - "IMAGE_DIRECTORY_ENTRY_EXCEPTION", - "IMAGE_DIRECTORY_ENTRY_EXPORT", - "IMAGE_DIRECTORY_ENTRY_GLOBALPTR", - "IMAGE_DIRECTORY_ENTRY_IAT", - "IMAGE_DIRECTORY_ENTRY_IMPORT", - "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", - "IMAGE_DIRECTORY_ENTRY_RESOURCE", - "IMAGE_DIRECTORY_ENTRY_SECURITY", - "IMAGE_DIRECTORY_ENTRY_TLS", - "IMAGE_DLLCHARACTERISTICS_APPCONTAINER", - "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", - "IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", - "IMAGE_DLLCHARACTERISTICS_GUARD_CF", - "IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", - "IMAGE_DLLCHARACTERISTICS_NO_BIND", - "IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", - "IMAGE_DLLCHARACTERISTICS_NO_SEH", - "IMAGE_DLLCHARACTERISTICS_NX_COMPAT", - "IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", - "IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", - "IMAGE_FILE_32BIT_MACHINE", - "IMAGE_FILE_AGGRESIVE_WS_TRIM", - "IMAGE_FILE_BYTES_REVERSED_HI", - "IMAGE_FILE_BYTES_REVERSED_LO", - "IMAGE_FILE_DEBUG_STRIPPED", - "IMAGE_FILE_DLL", - "IMAGE_FILE_EXECUTABLE_IMAGE", - "IMAGE_FILE_LARGE_ADDRESS_AWARE", - "IMAGE_FILE_LINE_NUMS_STRIPPED", - "IMAGE_FILE_LOCAL_SYMS_STRIPPED", - "IMAGE_FILE_MACHINE_AM33", - "IMAGE_FILE_MACHINE_AMD64", - "IMAGE_FILE_MACHINE_ARM", - "IMAGE_FILE_MACHINE_ARM64", - "IMAGE_FILE_MACHINE_ARMNT", - "IMAGE_FILE_MACHINE_EBC", - "IMAGE_FILE_MACHINE_I386", - "IMAGE_FILE_MACHINE_IA64", - "IMAGE_FILE_MACHINE_LOONGARCH32", - "IMAGE_FILE_MACHINE_LOONGARCH64", - "IMAGE_FILE_MACHINE_M32R", - "IMAGE_FILE_MACHINE_MIPS16", - "IMAGE_FILE_MACHINE_MIPSFPU", - "IMAGE_FILE_MACHINE_MIPSFPU16", - "IMAGE_FILE_MACHINE_POWERPC", - "IMAGE_FILE_MACHINE_POWERPCFP", - "IMAGE_FILE_MACHINE_R4000", - "IMAGE_FILE_MACHINE_RISCV128", - "IMAGE_FILE_MACHINE_RISCV32", - "IMAGE_FILE_MACHINE_RISCV64", - "IMAGE_FILE_MACHINE_SH3", - "IMAGE_FILE_MACHINE_SH3DSP", - "IMAGE_FILE_MACHINE_SH4", - "IMAGE_FILE_MACHINE_SH5", - "IMAGE_FILE_MACHINE_THUMB", - "IMAGE_FILE_MACHINE_UNKNOWN", - "IMAGE_FILE_MACHINE_WCEMIPSV2", - "IMAGE_FILE_NET_RUN_FROM_SWAP", - "IMAGE_FILE_RELOCS_STRIPPED", - "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", - "IMAGE_FILE_SYSTEM", - "IMAGE_FILE_UP_SYSTEM_ONLY", - "IMAGE_SCN_CNT_CODE", - "IMAGE_SCN_CNT_INITIALIZED_DATA", - "IMAGE_SCN_CNT_UNINITIALIZED_DATA", - "IMAGE_SCN_LNK_COMDAT", - "IMAGE_SCN_MEM_DISCARDABLE", - "IMAGE_SCN_MEM_EXECUTE", - "IMAGE_SCN_MEM_READ", - "IMAGE_SCN_MEM_WRITE", - "IMAGE_SUBSYSTEM_EFI_APPLICATION", - "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM", - "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", - "IMAGE_SUBSYSTEM_NATIVE", - "IMAGE_SUBSYSTEM_NATIVE_WINDOWS", - "IMAGE_SUBSYSTEM_OS2_CUI", - "IMAGE_SUBSYSTEM_POSIX_CUI", - "IMAGE_SUBSYSTEM_UNKNOWN", - "IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", - "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", - "IMAGE_SUBSYSTEM_WINDOWS_CUI", - "IMAGE_SUBSYSTEM_WINDOWS_GUI", - "IMAGE_SUBSYSTEM_XBOX", - "ImportDirectory", - "NewFile", - "Open", - "OptionalHeader32", - "OptionalHeader64", - "Reloc", - "Section", - "SectionHeader", - "SectionHeader32", - "StringTable", - "Symbol", - }, - "debug/plan9obj": { - "ErrNoSymbols", - "File", - "FileHeader", - "Magic386", - "Magic64", - "MagicAMD64", - "MagicARM", - "NewFile", - "Open", - "Section", - "SectionHeader", - "Sym", - }, - "embed": { - "FS", - }, - "encoding": { - "BinaryMarshaler", - "BinaryUnmarshaler", - "TextMarshaler", - "TextUnmarshaler", - }, - "encoding/ascii85": { - "CorruptInputError", - "Decode", - "Encode", - "MaxEncodedLen", - "NewDecoder", - "NewEncoder", - }, - "encoding/asn1": { - "BitString", - "ClassApplication", - "ClassContextSpecific", - "ClassPrivate", - "ClassUniversal", - "Enumerated", - "Flag", - "Marshal", - "MarshalWithParams", - "NullBytes", - "NullRawValue", - "ObjectIdentifier", - "RawContent", - "RawValue", - "StructuralError", - "SyntaxError", - "TagBMPString", - "TagBitString", - "TagBoolean", - "TagEnum", - "TagGeneralString", - "TagGeneralizedTime", - "TagIA5String", - "TagInteger", - "TagNull", - "TagNumericString", - "TagOID", - "TagOctetString", - "TagPrintableString", - "TagSequence", - "TagSet", - "TagT61String", - "TagUTCTime", - "TagUTF8String", - "Unmarshal", - "UnmarshalWithParams", - }, - "encoding/base32": { - "CorruptInputError", - "Encoding", - "HexEncoding", - "NewDecoder", - "NewEncoder", - "NewEncoding", - "NoPadding", - "StdEncoding", - "StdPadding", - }, - "encoding/base64": { - "CorruptInputError", - "Encoding", - "NewDecoder", - "NewEncoder", - "NewEncoding", - "NoPadding", - "RawStdEncoding", - "RawURLEncoding", - "StdEncoding", - "StdPadding", - "URLEncoding", - }, - "encoding/binary": { - "AppendByteOrder", - "AppendUvarint", - "AppendVarint", - "BigEndian", - "ByteOrder", - "LittleEndian", - "MaxVarintLen16", - "MaxVarintLen32", - "MaxVarintLen64", - "NativeEndian", - "PutUvarint", - "PutVarint", - "Read", - "ReadUvarint", - "ReadVarint", - "Size", - "Uvarint", - "Varint", - "Write", - }, - "encoding/csv": { - "ErrBareQuote", - "ErrFieldCount", - "ErrQuote", - "ErrTrailingComma", - "NewReader", - "NewWriter", - "ParseError", - "Reader", - "Writer", - }, - "encoding/gob": { - "CommonType", - "Decoder", - "Encoder", - "GobDecoder", - "GobEncoder", - "NewDecoder", - "NewEncoder", - "Register", - "RegisterName", - }, - "encoding/hex": { - "Decode", - "DecodeString", - "DecodedLen", - "Dump", - "Dumper", - "Encode", - "EncodeToString", - "EncodedLen", - "ErrLength", - "InvalidByteError", - "NewDecoder", - "NewEncoder", - }, - "encoding/json": { - "Compact", - "Decoder", - "Delim", - "Encoder", - "HTMLEscape", - "Indent", - "InvalidUTF8Error", - "InvalidUnmarshalError", - "Marshal", - "MarshalIndent", - "Marshaler", - "MarshalerError", - "NewDecoder", - "NewEncoder", - "Number", - "RawMessage", - "SyntaxError", - "Token", - "Unmarshal", - "UnmarshalFieldError", - "UnmarshalTypeError", - "Unmarshaler", - "UnsupportedTypeError", - "UnsupportedValueError", - "Valid", - }, - "encoding/pem": { - "Block", - "Decode", - "Encode", - "EncodeToMemory", - }, - "encoding/xml": { - "Attr", - "CharData", - "Comment", - "CopyToken", - "Decoder", - "Directive", - "Encoder", - "EndElement", - "Escape", - "EscapeText", - "HTMLAutoClose", - "HTMLEntity", - "Header", - "Marshal", - "MarshalIndent", - "Marshaler", - "MarshalerAttr", - "Name", - "NewDecoder", - "NewEncoder", - "NewTokenDecoder", - "ProcInst", - "StartElement", - "SyntaxError", - "TagPathError", - "Token", - "TokenReader", - "Unmarshal", - "UnmarshalError", - "Unmarshaler", - "UnmarshalerAttr", - "UnsupportedTypeError", - }, - "errors": { - "As", - "ErrUnsupported", - "Is", - "Join", - "New", - "Unwrap", - }, - "expvar": { - "Do", - "Float", - "Func", - "Get", - "Handler", - "Int", - "KeyValue", - "Map", - "NewFloat", - "NewInt", - "NewMap", - "NewString", - "Publish", - "String", - "Var", - }, - "flag": { - "Arg", - "Args", - "Bool", - "BoolFunc", - "BoolVar", - "CommandLine", - "ContinueOnError", - "Duration", - "DurationVar", - "ErrHelp", - "ErrorHandling", - "ExitOnError", - "Flag", - "FlagSet", - "Float64", - "Float64Var", - "Func", - "Getter", - "Int", - "Int64", - "Int64Var", - "IntVar", - "Lookup", - "NArg", - "NFlag", - "NewFlagSet", - "PanicOnError", - "Parse", - "Parsed", - "PrintDefaults", - "Set", - "String", - "StringVar", - "TextVar", - "Uint", - "Uint64", - "Uint64Var", - "UintVar", - "UnquoteUsage", - "Usage", - "Value", - "Var", - "Visit", - "VisitAll", - }, - "fmt": { - "Append", - "Appendf", - "Appendln", - "Errorf", - "FormatString", - "Formatter", - "Fprint", - "Fprintf", - "Fprintln", - "Fscan", - "Fscanf", - "Fscanln", - "GoStringer", - "Print", - "Printf", - "Println", - "Scan", - "ScanState", - "Scanf", - "Scanln", - "Scanner", - "Sprint", - "Sprintf", - "Sprintln", - "Sscan", - "Sscanf", - "Sscanln", - "State", - "Stringer", - }, - "go/ast": { - "ArrayType", - "AssignStmt", - "Bad", - "BadDecl", - "BadExpr", - "BadStmt", - "BasicLit", - "BinaryExpr", - "BlockStmt", - "BranchStmt", - "CallExpr", - "CaseClause", - "ChanDir", - "ChanType", - "CommClause", - "Comment", - "CommentGroup", - "CommentMap", - "CompositeLit", - "Con", - "Decl", - "DeclStmt", - "DeferStmt", - "Ellipsis", - "EmptyStmt", - "Expr", - "ExprStmt", - "Field", - "FieldFilter", - "FieldList", - "File", - "FileExports", - "Filter", - "FilterDecl", - "FilterFile", - "FilterFuncDuplicates", - "FilterImportDuplicates", - "FilterPackage", - "FilterUnassociatedComments", - "ForStmt", - "Fprint", - "Fun", - "FuncDecl", - "FuncLit", - "FuncType", - "GenDecl", - "GoStmt", - "Ident", - "IfStmt", - "ImportSpec", - "Importer", - "IncDecStmt", - "IndexExpr", - "IndexListExpr", - "Inspect", - "InterfaceType", - "IsExported", - "IsGenerated", - "KeyValueExpr", - "LabeledStmt", - "Lbl", - "MapType", - "MergeMode", - "MergePackageFiles", - "NewCommentMap", - "NewIdent", - "NewObj", - "NewPackage", - "NewScope", - "Node", - "NotNilFilter", - "ObjKind", - "Object", - "Package", - "PackageExports", - "ParenExpr", - "Pkg", - "Print", - "RECV", - "RangeStmt", - "ReturnStmt", - "SEND", - "Scope", - "SelectStmt", - "SelectorExpr", - "SendStmt", - "SliceExpr", - "SortImports", - "Spec", - "StarExpr", - "Stmt", - "StructType", - "SwitchStmt", - "Typ", - "TypeAssertExpr", - "TypeSpec", - "TypeSwitchStmt", - "UnaryExpr", - "ValueSpec", - "Var", - "Visitor", - "Walk", - }, - "go/build": { - "AllowBinary", - "ArchChar", - "Context", - "Default", - "Directive", - "FindOnly", - "IgnoreVendor", - "Import", - "ImportComment", - "ImportDir", - "ImportMode", - "IsLocalImport", - "MultiplePackageError", - "NoGoError", - "Package", - "ToolDir", - }, - "go/build/constraint": { - "AndExpr", - "Expr", - "GoVersion", - "IsGoBuild", - "IsPlusBuild", - "NotExpr", - "OrExpr", - "Parse", - "PlusBuildLines", - "SyntaxError", - "TagExpr", - }, - "go/constant": { - "BinaryOp", - "BitLen", - "Bool", - "BoolVal", - "Bytes", - "Compare", - "Complex", - "Denom", - "Float", - "Float32Val", - "Float64Val", - "Imag", - "Int", - "Int64Val", - "Kind", - "Make", - "MakeBool", - "MakeFloat64", - "MakeFromBytes", - "MakeFromLiteral", - "MakeImag", - "MakeInt64", - "MakeString", - "MakeUint64", - "MakeUnknown", - "Num", - "Real", - "Shift", - "Sign", - "String", - "StringVal", - "ToComplex", - "ToFloat", - "ToInt", - "Uint64Val", - "UnaryOp", - "Unknown", - "Val", - "Value", - }, - "go/doc": { - "AllDecls", - "AllMethods", - "Example", - "Examples", - "Filter", - "Func", - "IllegalPrefixes", - "IsPredeclared", - "Mode", - "New", - "NewFromFiles", - "Note", - "Package", - "PreserveAST", - "Synopsis", - "ToHTML", - "ToText", - "Type", - "Value", - }, - "go/doc/comment": { - "Block", - "Code", - "DefaultLookupPackage", - "Doc", - "DocLink", - "Heading", - "Italic", - "Link", - "LinkDef", - "List", - "ListItem", - "Paragraph", - "Parser", - "Plain", - "Printer", - "Text", - }, - "go/format": { - "Node", - "Source", - }, - "go/importer": { - "Default", - "For", - "ForCompiler", - "Lookup", - }, - "go/parser": { - "AllErrors", - "DeclarationErrors", - "ImportsOnly", - "Mode", - "PackageClauseOnly", - "ParseComments", - "ParseDir", - "ParseExpr", - "ParseExprFrom", - "ParseFile", - "SkipObjectResolution", - "SpuriousErrors", - "Trace", - }, - "go/printer": { - "CommentedNode", - "Config", - "Fprint", - "Mode", - "RawFormat", - "SourcePos", - "TabIndent", - "UseSpaces", - }, - "go/scanner": { - "Error", - "ErrorHandler", - "ErrorList", - "Mode", - "PrintError", - "ScanComments", - "Scanner", - }, - "go/token": { - "ADD", - "ADD_ASSIGN", - "AND", - "AND_ASSIGN", - "AND_NOT", - "AND_NOT_ASSIGN", - "ARROW", - "ASSIGN", - "BREAK", - "CASE", - "CHAN", - "CHAR", - "COLON", - "COMMA", - "COMMENT", - "CONST", - "CONTINUE", - "DEC", - "DEFAULT", - "DEFER", - "DEFINE", - "ELLIPSIS", - "ELSE", - "EOF", - "EQL", - "FALLTHROUGH", - "FLOAT", - "FOR", - "FUNC", - "File", - "FileSet", - "GEQ", - "GO", - "GOTO", - "GTR", - "HighestPrec", - "IDENT", - "IF", - "ILLEGAL", - "IMAG", - "IMPORT", - "INC", - "INT", - "INTERFACE", - "IsExported", - "IsIdentifier", - "IsKeyword", - "LAND", - "LBRACE", - "LBRACK", - "LEQ", - "LOR", - "LPAREN", - "LSS", - "Lookup", - "LowestPrec", - "MAP", - "MUL", - "MUL_ASSIGN", - "NEQ", - "NOT", - "NewFileSet", - "NoPos", - "OR", - "OR_ASSIGN", - "PACKAGE", - "PERIOD", - "Pos", - "Position", - "QUO", - "QUO_ASSIGN", - "RANGE", - "RBRACE", - "RBRACK", - "REM", - "REM_ASSIGN", - "RETURN", - "RPAREN", - "SELECT", - "SEMICOLON", - "SHL", - "SHL_ASSIGN", - "SHR", - "SHR_ASSIGN", - "STRING", - "STRUCT", - "SUB", - "SUB_ASSIGN", - "SWITCH", - "TILDE", - "TYPE", - "Token", - "UnaryPrec", - "VAR", - "XOR", - "XOR_ASSIGN", - }, - "go/types": { - "ArgumentError", - "Array", - "AssertableTo", - "AssignableTo", - "Basic", - "BasicInfo", - "BasicKind", - "Bool", - "Builtin", - "Byte", - "Chan", - "ChanDir", - "CheckExpr", - "Checker", - "Comparable", - "Complex128", - "Complex64", - "Config", - "Const", - "Context", - "ConvertibleTo", - "DefPredeclaredTestFuncs", - "Default", - "Error", - "Eval", - "ExprString", - "FieldVal", - "Float32", - "Float64", - "Func", - "Id", - "Identical", - "IdenticalIgnoreTags", - "Implements", - "ImportMode", - "Importer", - "ImporterFrom", - "Info", - "Initializer", - "Instance", - "Instantiate", - "Int", - "Int16", - "Int32", - "Int64", - "Int8", - "Interface", - "Invalid", - "IsBoolean", - "IsComplex", - "IsConstType", - "IsFloat", - "IsInteger", - "IsInterface", - "IsNumeric", - "IsOrdered", - "IsString", - "IsUnsigned", - "IsUntyped", - "Label", - "LookupFieldOrMethod", - "Map", - "MethodExpr", - "MethodSet", - "MethodVal", - "MissingMethod", - "Named", - "NewArray", - "NewChan", - "NewChecker", - "NewConst", - "NewContext", - "NewField", - "NewFunc", - "NewInterface", - "NewInterfaceType", - "NewLabel", - "NewMap", - "NewMethodSet", - "NewNamed", - "NewPackage", - "NewParam", - "NewPkgName", - "NewPointer", - "NewScope", - "NewSignature", - "NewSignatureType", - "NewSlice", - "NewStruct", - "NewTerm", - "NewTuple", - "NewTypeName", - "NewTypeParam", - "NewUnion", - "NewVar", - "Nil", - "Object", - "ObjectString", - "Package", - "PkgName", - "Pointer", - "Qualifier", - "RecvOnly", - "RelativeTo", - "Rune", - "Satisfies", - "Scope", - "Selection", - "SelectionKind", - "SelectionString", - "SendOnly", - "SendRecv", - "Signature", - "Sizes", - "SizesFor", - "Slice", - "StdSizes", - "String", - "Struct", - "Term", - "Tuple", - "Typ", - "Type", - "TypeAndValue", - "TypeList", - "TypeName", - "TypeParam", - "TypeParamList", - "TypeString", - "Uint", - "Uint16", - "Uint32", - "Uint64", - "Uint8", - "Uintptr", - "Union", - "Universe", - "Unsafe", - "UnsafePointer", - "UntypedBool", - "UntypedComplex", - "UntypedFloat", - "UntypedInt", - "UntypedNil", - "UntypedRune", - "UntypedString", - "Var", - "WriteExpr", - "WriteSignature", - "WriteType", - }, - "hash": { - "Hash", - "Hash32", - "Hash64", - }, - "hash/adler32": { - "Checksum", - "New", - "Size", - }, - "hash/crc32": { - "Castagnoli", - "Checksum", - "ChecksumIEEE", - "IEEE", - "IEEETable", - "Koopman", - "MakeTable", - "New", - "NewIEEE", - "Size", - "Table", - "Update", - }, - "hash/crc64": { - "Checksum", - "ECMA", - "ISO", - "MakeTable", - "New", - "Size", - "Table", - "Update", - }, - "hash/fnv": { - "New128", - "New128a", - "New32", - "New32a", - "New64", - "New64a", - }, - "hash/maphash": { - "Bytes", - "Hash", - "MakeSeed", - "Seed", - "String", - }, - "html": { - "EscapeString", - "UnescapeString", - }, - "html/template": { - "CSS", - "ErrAmbigContext", - "ErrBadHTML", - "ErrBranchEnd", - "ErrEndContext", - "ErrJSTemplate", - "ErrNoSuchTemplate", - "ErrOutputContext", - "ErrPartialCharset", - "ErrPartialEscape", - "ErrPredefinedEscaper", - "ErrRangeLoopReentry", - "ErrSlashAmbig", - "Error", - "ErrorCode", - "FuncMap", - "HTML", - "HTMLAttr", - "HTMLEscape", - "HTMLEscapeString", - "HTMLEscaper", - "IsTrue", - "JS", - "JSEscape", - "JSEscapeString", - "JSEscaper", - "JSStr", - "Must", - "New", - "OK", - "ParseFS", - "ParseFiles", - "ParseGlob", - "Srcset", - "Template", - "URL", - "URLQueryEscaper", - }, - "image": { - "Alpha", - "Alpha16", - "Black", - "CMYK", - "Config", - "Decode", - "DecodeConfig", - "ErrFormat", - "Gray", - "Gray16", - "Image", - "NRGBA", - "NRGBA64", - "NYCbCrA", - "NewAlpha", - "NewAlpha16", - "NewCMYK", - "NewGray", - "NewGray16", - "NewNRGBA", - "NewNRGBA64", - "NewNYCbCrA", - "NewPaletted", - "NewRGBA", - "NewRGBA64", - "NewUniform", - "NewYCbCr", - "Opaque", - "Paletted", - "PalettedImage", - "Point", - "Pt", - "RGBA", - "RGBA64", - "RGBA64Image", - "Rect", - "Rectangle", - "RegisterFormat", - "Transparent", - "Uniform", - "White", - "YCbCr", - "YCbCrSubsampleRatio", - "YCbCrSubsampleRatio410", - "YCbCrSubsampleRatio411", - "YCbCrSubsampleRatio420", - "YCbCrSubsampleRatio422", - "YCbCrSubsampleRatio440", - "YCbCrSubsampleRatio444", - "ZP", - "ZR", - }, - "image/color": { - "Alpha", - "Alpha16", - "Alpha16Model", - "AlphaModel", - "Black", - "CMYK", - "CMYKModel", - "CMYKToRGB", - "Color", - "Gray", - "Gray16", - "Gray16Model", - "GrayModel", - "Model", - "ModelFunc", - "NRGBA", - "NRGBA64", - "NRGBA64Model", - "NRGBAModel", - "NYCbCrA", - "NYCbCrAModel", - "Opaque", - "Palette", - "RGBA", - "RGBA64", - "RGBA64Model", - "RGBAModel", - "RGBToCMYK", - "RGBToYCbCr", - "Transparent", - "White", - "YCbCr", - "YCbCrModel", - "YCbCrToRGB", - }, - "image/color/palette": { - "Plan9", - "WebSafe", - }, - "image/draw": { - "Draw", - "DrawMask", - "Drawer", - "FloydSteinberg", - "Image", - "Op", - "Over", - "Quantizer", - "RGBA64Image", - "Src", - }, - "image/gif": { - "Decode", - "DecodeAll", - "DecodeConfig", - "DisposalBackground", - "DisposalNone", - "DisposalPrevious", - "Encode", - "EncodeAll", - "GIF", - "Options", - }, - "image/jpeg": { - "Decode", - "DecodeConfig", - "DefaultQuality", - "Encode", - "FormatError", - "Options", - "Reader", - "UnsupportedError", - }, - "image/png": { - "BestCompression", - "BestSpeed", - "CompressionLevel", - "Decode", - "DecodeConfig", - "DefaultCompression", - "Encode", - "Encoder", - "EncoderBuffer", - "EncoderBufferPool", - "FormatError", - "NoCompression", - "UnsupportedError", - }, - "index/suffixarray": { - "Index", - "New", - }, - "io": { - "ByteReader", - "ByteScanner", - "ByteWriter", - "Closer", - "Copy", - "CopyBuffer", - "CopyN", - "Discard", - "EOF", - "ErrClosedPipe", - "ErrNoProgress", - "ErrShortBuffer", - "ErrShortWrite", - "ErrUnexpectedEOF", - "LimitReader", - "LimitedReader", - "MultiReader", - "MultiWriter", - "NewOffsetWriter", - "NewSectionReader", - "NopCloser", - "OffsetWriter", - "Pipe", - "PipeReader", - "PipeWriter", - "ReadAll", - "ReadAtLeast", - "ReadCloser", - "ReadFull", - "ReadSeekCloser", - "ReadSeeker", - "ReadWriteCloser", - "ReadWriteSeeker", - "ReadWriter", - "Reader", - "ReaderAt", - "ReaderFrom", - "RuneReader", - "RuneScanner", - "SectionReader", - "SeekCurrent", - "SeekEnd", - "SeekStart", - "Seeker", - "StringWriter", - "TeeReader", - "WriteCloser", - "WriteSeeker", - "WriteString", - "Writer", - "WriterAt", - "WriterTo", - }, - "io/fs": { - "DirEntry", - "ErrClosed", - "ErrExist", - "ErrInvalid", - "ErrNotExist", - "ErrPermission", - "FS", - "File", - "FileInfo", - "FileInfoToDirEntry", - "FileMode", - "FormatDirEntry", - "FormatFileInfo", - "Glob", - "GlobFS", - "ModeAppend", - "ModeCharDevice", - "ModeDevice", - "ModeDir", - "ModeExclusive", - "ModeIrregular", - "ModeNamedPipe", - "ModePerm", - "ModeSetgid", - "ModeSetuid", - "ModeSocket", - "ModeSticky", - "ModeSymlink", - "ModeTemporary", - "ModeType", - "PathError", - "ReadDir", - "ReadDirFS", - "ReadDirFile", - "ReadFile", - "ReadFileFS", - "SkipAll", - "SkipDir", - "Stat", - "StatFS", - "Sub", - "SubFS", - "ValidPath", - "WalkDir", - "WalkDirFunc", - }, - "io/ioutil": { - "Discard", - "NopCloser", - "ReadAll", - "ReadDir", - "ReadFile", - "TempDir", - "TempFile", - "WriteFile", - }, - "log": { - "Default", - "Fatal", - "Fatalf", - "Fatalln", - "Flags", - "LUTC", - "Ldate", - "Llongfile", - "Lmicroseconds", - "Lmsgprefix", - "Logger", - "Lshortfile", - "LstdFlags", - "Ltime", - "New", - "Output", - "Panic", - "Panicf", - "Panicln", - "Prefix", - "Print", - "Printf", - "Println", - "SetFlags", - "SetOutput", - "SetPrefix", - "Writer", - }, - "log/slog": { - "Any", - "AnyValue", - "Attr", - "Bool", - "BoolValue", - "Debug", - "DebugContext", - "Default", - "Duration", - "DurationValue", - "Error", - "ErrorContext", - "Float64", - "Float64Value", - "Group", - "GroupValue", - "Handler", - "HandlerOptions", - "Info", - "InfoContext", - "Int", - "Int64", - "Int64Value", - "IntValue", - "JSONHandler", - "Kind", - "KindAny", - "KindBool", - "KindDuration", - "KindFloat64", - "KindGroup", - "KindInt64", - "KindLogValuer", - "KindString", - "KindTime", - "KindUint64", - "Level", - "LevelDebug", - "LevelError", - "LevelInfo", - "LevelKey", - "LevelVar", - "LevelWarn", - "Leveler", - "Log", - "LogAttrs", - "LogValuer", - "Logger", - "MessageKey", - "New", - "NewJSONHandler", - "NewLogLogger", - "NewRecord", - "NewTextHandler", - "Record", - "SetDefault", - "Source", - "SourceKey", - "String", - "StringValue", - "TextHandler", - "Time", - "TimeKey", - "TimeValue", - "Uint64", - "Uint64Value", - "Value", - "Warn", - "WarnContext", - "With", - }, - "log/syslog": { - "Dial", - "LOG_ALERT", - "LOG_AUTH", - "LOG_AUTHPRIV", - "LOG_CRIT", - "LOG_CRON", - "LOG_DAEMON", - "LOG_DEBUG", - "LOG_EMERG", - "LOG_ERR", - "LOG_FTP", - "LOG_INFO", - "LOG_KERN", - "LOG_LOCAL0", - "LOG_LOCAL1", - "LOG_LOCAL2", - "LOG_LOCAL3", - "LOG_LOCAL4", - "LOG_LOCAL5", - "LOG_LOCAL6", - "LOG_LOCAL7", - "LOG_LPR", - "LOG_MAIL", - "LOG_NEWS", - "LOG_NOTICE", - "LOG_SYSLOG", - "LOG_USER", - "LOG_UUCP", - "LOG_WARNING", - "New", - "NewLogger", - "Priority", - "Writer", - }, - "maps": { - "Clone", - "Copy", - "DeleteFunc", - "Equal", - "EqualFunc", - }, - "math": { - "Abs", - "Acos", - "Acosh", - "Asin", - "Asinh", - "Atan", - "Atan2", - "Atanh", - "Cbrt", - "Ceil", - "Copysign", - "Cos", - "Cosh", - "Dim", - "E", - "Erf", - "Erfc", - "Erfcinv", - "Erfinv", - "Exp", - "Exp2", - "Expm1", - "FMA", - "Float32bits", - "Float32frombits", - "Float64bits", - "Float64frombits", - "Floor", - "Frexp", - "Gamma", - "Hypot", - "Ilogb", - "Inf", - "IsInf", - "IsNaN", - "J0", - "J1", - "Jn", - "Ldexp", - "Lgamma", - "Ln10", - "Ln2", - "Log", - "Log10", - "Log10E", - "Log1p", - "Log2", - "Log2E", - "Logb", - "Max", - "MaxFloat32", - "MaxFloat64", - "MaxInt", - "MaxInt16", - "MaxInt32", - "MaxInt64", - "MaxInt8", - "MaxUint", - "MaxUint16", - "MaxUint32", - "MaxUint64", - "MaxUint8", - "Min", - "MinInt", - "MinInt16", - "MinInt32", - "MinInt64", - "MinInt8", - "Mod", - "Modf", - "NaN", - "Nextafter", - "Nextafter32", - "Phi", - "Pi", - "Pow", - "Pow10", - "Remainder", - "Round", - "RoundToEven", - "Signbit", - "Sin", - "Sincos", - "Sinh", - "SmallestNonzeroFloat32", - "SmallestNonzeroFloat64", - "Sqrt", - "Sqrt2", - "SqrtE", - "SqrtPhi", - "SqrtPi", - "Tan", - "Tanh", - "Trunc", - "Y0", - "Y1", - "Yn", - }, - "math/big": { - "Above", - "Accuracy", - "AwayFromZero", - "Below", - "ErrNaN", - "Exact", - "Float", - "Int", - "Jacobi", - "MaxBase", - "MaxExp", - "MaxPrec", - "MinExp", - "NewFloat", - "NewInt", - "NewRat", - "ParseFloat", - "Rat", - "RoundingMode", - "ToNearestAway", - "ToNearestEven", - "ToNegativeInf", - "ToPositiveInf", - "ToZero", - "Word", - }, - "math/bits": { - "Add", - "Add32", - "Add64", - "Div", - "Div32", - "Div64", - "LeadingZeros", - "LeadingZeros16", - "LeadingZeros32", - "LeadingZeros64", - "LeadingZeros8", - "Len", - "Len16", - "Len32", - "Len64", - "Len8", - "Mul", - "Mul32", - "Mul64", - "OnesCount", - "OnesCount16", - "OnesCount32", - "OnesCount64", - "OnesCount8", - "Rem", - "Rem32", - "Rem64", - "Reverse", - "Reverse16", - "Reverse32", - "Reverse64", - "Reverse8", - "ReverseBytes", - "ReverseBytes16", - "ReverseBytes32", - "ReverseBytes64", - "RotateLeft", - "RotateLeft16", - "RotateLeft32", - "RotateLeft64", - "RotateLeft8", - "Sub", - "Sub32", - "Sub64", - "TrailingZeros", - "TrailingZeros16", - "TrailingZeros32", - "TrailingZeros64", - "TrailingZeros8", - "UintSize", - }, - "math/cmplx": { - "Abs", - "Acos", - "Acosh", - "Asin", - "Asinh", - "Atan", - "Atanh", - "Conj", - "Cos", - "Cosh", - "Cot", - "Exp", - "Inf", - "IsInf", - "IsNaN", - "Log", - "Log10", - "NaN", - "Phase", - "Polar", - "Pow", - "Rect", - "Sin", - "Sinh", - "Sqrt", - "Tan", - "Tanh", - }, - "math/rand": { - "ExpFloat64", - "Float32", - "Float64", - "Int", - "Int31", - "Int31n", - "Int63", - "Int63n", - "Intn", - "New", - "NewSource", - "NewZipf", - "NormFloat64", - "Perm", - "Rand", - "Read", - "Seed", - "Shuffle", - "Source", - "Source64", - "Uint32", - "Uint64", - "Zipf", - }, - "mime": { - "AddExtensionType", - "BEncoding", - "ErrInvalidMediaParameter", - "ExtensionsByType", - "FormatMediaType", - "ParseMediaType", - "QEncoding", - "TypeByExtension", - "WordDecoder", - "WordEncoder", - }, - "mime/multipart": { - "ErrMessageTooLarge", - "File", - "FileHeader", - "Form", - "NewReader", - "NewWriter", - "Part", - "Reader", - "Writer", - }, - "mime/quotedprintable": { - "NewReader", - "NewWriter", - "Reader", - "Writer", - }, - "net": { - "Addr", - "AddrError", - "Buffers", - "CIDRMask", - "Conn", - "DNSConfigError", - "DNSError", - "DefaultResolver", - "Dial", - "DialIP", - "DialTCP", - "DialTimeout", - "DialUDP", - "DialUnix", - "Dialer", - "ErrClosed", - "ErrWriteToConnected", - "Error", - "FileConn", - "FileListener", - "FilePacketConn", - "FlagBroadcast", - "FlagLoopback", - "FlagMulticast", - "FlagPointToPoint", - "FlagRunning", - "FlagUp", - "Flags", - "HardwareAddr", - "IP", - "IPAddr", - "IPConn", - "IPMask", - "IPNet", - "IPv4", - "IPv4Mask", - "IPv4allrouter", - "IPv4allsys", - "IPv4bcast", - "IPv4len", - "IPv4zero", - "IPv6interfacelocalallnodes", - "IPv6len", - "IPv6linklocalallnodes", - "IPv6linklocalallrouters", - "IPv6loopback", - "IPv6unspecified", - "IPv6zero", - "Interface", - "InterfaceAddrs", - "InterfaceByIndex", - "InterfaceByName", - "Interfaces", - "InvalidAddrError", - "JoinHostPort", - "Listen", - "ListenConfig", - "ListenIP", - "ListenMulticastUDP", - "ListenPacket", - "ListenTCP", - "ListenUDP", - "ListenUnix", - "ListenUnixgram", - "Listener", - "LookupAddr", - "LookupCNAME", - "LookupHost", - "LookupIP", - "LookupMX", - "LookupNS", - "LookupPort", - "LookupSRV", - "LookupTXT", - "MX", - "NS", - "OpError", - "PacketConn", - "ParseCIDR", - "ParseError", - "ParseIP", - "ParseMAC", - "Pipe", - "ResolveIPAddr", - "ResolveTCPAddr", - "ResolveUDPAddr", - "ResolveUnixAddr", - "Resolver", - "SRV", - "SplitHostPort", - "TCPAddr", - "TCPAddrFromAddrPort", - "TCPConn", - "TCPListener", - "UDPAddr", - "UDPAddrFromAddrPort", - "UDPConn", - "UnixAddr", - "UnixConn", - "UnixListener", - "UnknownNetworkError", - }, - "net/http": { - "AllowQuerySemicolons", - "CanonicalHeaderKey", - "Client", - "CloseNotifier", - "ConnState", - "Cookie", - "CookieJar", - "DefaultClient", - "DefaultMaxHeaderBytes", - "DefaultMaxIdleConnsPerHost", - "DefaultServeMux", - "DefaultTransport", - "DetectContentType", - "Dir", - "ErrAbortHandler", - "ErrBodyNotAllowed", - "ErrBodyReadAfterClose", - "ErrContentLength", - "ErrHandlerTimeout", - "ErrHeaderTooLong", - "ErrHijacked", - "ErrLineTooLong", - "ErrMissingBoundary", - "ErrMissingContentLength", - "ErrMissingFile", - "ErrNoCookie", - "ErrNoLocation", - "ErrNotMultipart", - "ErrNotSupported", - "ErrSchemeMismatch", - "ErrServerClosed", - "ErrShortBody", - "ErrSkipAltProtocol", - "ErrUnexpectedTrailer", - "ErrUseLastResponse", - "ErrWriteAfterFlush", - "Error", - "FS", - "File", - "FileServer", - "FileSystem", - "Flusher", - "Get", - "Handle", - "HandleFunc", - "Handler", - "HandlerFunc", - "Head", - "Header", - "Hijacker", - "ListenAndServe", - "ListenAndServeTLS", - "LocalAddrContextKey", - "MaxBytesError", - "MaxBytesHandler", - "MaxBytesReader", - "MethodConnect", - "MethodDelete", - "MethodGet", - "MethodHead", - "MethodOptions", - "MethodPatch", - "MethodPost", - "MethodPut", - "MethodTrace", - "NewFileTransport", - "NewRequest", - "NewRequestWithContext", - "NewResponseController", - "NewServeMux", - "NoBody", - "NotFound", - "NotFoundHandler", - "ParseHTTPVersion", - "ParseTime", - "Post", - "PostForm", - "ProtocolError", - "ProxyFromEnvironment", - "ProxyURL", - "PushOptions", - "Pusher", - "ReadRequest", - "ReadResponse", - "Redirect", - "RedirectHandler", - "Request", - "Response", - "ResponseController", - "ResponseWriter", - "RoundTripper", - "SameSite", - "SameSiteDefaultMode", - "SameSiteLaxMode", - "SameSiteNoneMode", - "SameSiteStrictMode", - "Serve", - "ServeContent", - "ServeFile", - "ServeMux", - "ServeTLS", - "Server", - "ServerContextKey", - "SetCookie", - "StateActive", - "StateClosed", - "StateHijacked", - "StateIdle", - "StateNew", - "StatusAccepted", - "StatusAlreadyReported", - "StatusBadGateway", - "StatusBadRequest", - "StatusConflict", - "StatusContinue", - "StatusCreated", - "StatusEarlyHints", - "StatusExpectationFailed", - "StatusFailedDependency", - "StatusForbidden", - "StatusFound", - "StatusGatewayTimeout", - "StatusGone", - "StatusHTTPVersionNotSupported", - "StatusIMUsed", - "StatusInsufficientStorage", - "StatusInternalServerError", - "StatusLengthRequired", - "StatusLocked", - "StatusLoopDetected", - "StatusMethodNotAllowed", - "StatusMisdirectedRequest", - "StatusMovedPermanently", - "StatusMultiStatus", - "StatusMultipleChoices", - "StatusNetworkAuthenticationRequired", - "StatusNoContent", - "StatusNonAuthoritativeInfo", - "StatusNotAcceptable", - "StatusNotExtended", - "StatusNotFound", - "StatusNotImplemented", - "StatusNotModified", - "StatusOK", - "StatusPartialContent", - "StatusPaymentRequired", - "StatusPermanentRedirect", - "StatusPreconditionFailed", - "StatusPreconditionRequired", - "StatusProcessing", - "StatusProxyAuthRequired", - "StatusRequestEntityTooLarge", - "StatusRequestHeaderFieldsTooLarge", - "StatusRequestTimeout", - "StatusRequestURITooLong", - "StatusRequestedRangeNotSatisfiable", - "StatusResetContent", - "StatusSeeOther", - "StatusServiceUnavailable", - "StatusSwitchingProtocols", - "StatusTeapot", - "StatusTemporaryRedirect", - "StatusText", - "StatusTooEarly", - "StatusTooManyRequests", - "StatusUnauthorized", - "StatusUnavailableForLegalReasons", - "StatusUnprocessableEntity", - "StatusUnsupportedMediaType", - "StatusUpgradeRequired", - "StatusUseProxy", - "StatusVariantAlsoNegotiates", - "StripPrefix", - "TimeFormat", - "TimeoutHandler", - "TrailerPrefix", - "Transport", - }, - "net/http/cgi": { - "Handler", - "Request", - "RequestFromMap", - "Serve", - }, - "net/http/cookiejar": { - "Jar", - "New", - "Options", - "PublicSuffixList", - }, - "net/http/fcgi": { - "ErrConnClosed", - "ErrRequestAborted", - "ProcessEnv", - "Serve", - }, - "net/http/httptest": { - "DefaultRemoteAddr", - "NewRecorder", - "NewRequest", - "NewServer", - "NewTLSServer", - "NewUnstartedServer", - "ResponseRecorder", - "Server", - }, - "net/http/httptrace": { - "ClientTrace", - "ContextClientTrace", - "DNSDoneInfo", - "DNSStartInfo", - "GotConnInfo", - "WithClientTrace", - "WroteRequestInfo", - }, - "net/http/httputil": { - "BufferPool", - "ClientConn", - "DumpRequest", - "DumpRequestOut", - "DumpResponse", - "ErrClosed", - "ErrLineTooLong", - "ErrPersistEOF", - "ErrPipeline", - "NewChunkedReader", - "NewChunkedWriter", - "NewClientConn", - "NewProxyClientConn", - "NewServerConn", - "NewSingleHostReverseProxy", - "ProxyRequest", - "ReverseProxy", - "ServerConn", - }, - "net/http/pprof": { - "Cmdline", - "Handler", - "Index", - "Profile", - "Symbol", - "Trace", - }, - "net/mail": { - "Address", - "AddressParser", - "ErrHeaderNotPresent", - "Header", - "Message", - "ParseAddress", - "ParseAddressList", - "ParseDate", - "ReadMessage", - }, - "net/netip": { - "Addr", - "AddrFrom16", - "AddrFrom4", - "AddrFromSlice", - "AddrPort", - "AddrPortFrom", - "IPv4Unspecified", - "IPv6LinkLocalAllNodes", - "IPv6LinkLocalAllRouters", - "IPv6Loopback", - "IPv6Unspecified", - "MustParseAddr", - "MustParseAddrPort", - "MustParsePrefix", - "ParseAddr", - "ParseAddrPort", - "ParsePrefix", - "Prefix", - "PrefixFrom", - }, - "net/rpc": { - "Accept", - "Call", - "Client", - "ClientCodec", - "DefaultDebugPath", - "DefaultRPCPath", - "DefaultServer", - "Dial", - "DialHTTP", - "DialHTTPPath", - "ErrShutdown", - "HandleHTTP", - "NewClient", - "NewClientWithCodec", - "NewServer", - "Register", - "RegisterName", - "Request", - "Response", - "ServeCodec", - "ServeConn", - "ServeRequest", - "Server", - "ServerCodec", - "ServerError", - }, - "net/rpc/jsonrpc": { - "Dial", - "NewClient", - "NewClientCodec", - "NewServerCodec", - "ServeConn", - }, - "net/smtp": { - "Auth", - "CRAMMD5Auth", - "Client", - "Dial", - "NewClient", - "PlainAuth", - "SendMail", - "ServerInfo", - }, - "net/textproto": { - "CanonicalMIMEHeaderKey", - "Conn", - "Dial", - "Error", - "MIMEHeader", - "NewConn", - "NewReader", - "NewWriter", - "Pipeline", - "ProtocolError", - "Reader", - "TrimBytes", - "TrimString", - "Writer", - }, - "net/url": { - "Error", - "EscapeError", - "InvalidHostError", - "JoinPath", - "Parse", - "ParseQuery", - "ParseRequestURI", - "PathEscape", - "PathUnescape", - "QueryEscape", - "QueryUnescape", - "URL", - "User", - "UserPassword", - "Userinfo", - "Values", - }, - "os": { - "Args", - "Chdir", - "Chmod", - "Chown", - "Chtimes", - "Clearenv", - "Create", - "CreateTemp", - "DevNull", - "DirEntry", - "DirFS", - "Environ", - "ErrClosed", - "ErrDeadlineExceeded", - "ErrExist", - "ErrInvalid", - "ErrNoDeadline", - "ErrNotExist", - "ErrPermission", - "ErrProcessDone", - "Executable", - "Exit", - "Expand", - "ExpandEnv", - "File", - "FileInfo", - "FileMode", - "FindProcess", - "Getegid", - "Getenv", - "Geteuid", - "Getgid", - "Getgroups", - "Getpagesize", - "Getpid", - "Getppid", - "Getuid", - "Getwd", - "Hostname", - "Interrupt", - "IsExist", - "IsNotExist", - "IsPathSeparator", - "IsPermission", - "IsTimeout", - "Kill", - "Lchown", - "Link", - "LinkError", - "LookupEnv", - "Lstat", - "Mkdir", - "MkdirAll", - "MkdirTemp", - "ModeAppend", - "ModeCharDevice", - "ModeDevice", - "ModeDir", - "ModeExclusive", - "ModeIrregular", - "ModeNamedPipe", - "ModePerm", - "ModeSetgid", - "ModeSetuid", - "ModeSocket", - "ModeSticky", - "ModeSymlink", - "ModeTemporary", - "ModeType", - "NewFile", - "NewSyscallError", - "O_APPEND", - "O_CREATE", - "O_EXCL", - "O_RDONLY", - "O_RDWR", - "O_SYNC", - "O_TRUNC", - "O_WRONLY", - "Open", - "OpenFile", - "PathError", - "PathListSeparator", - "PathSeparator", - "Pipe", - "ProcAttr", - "Process", - "ProcessState", - "ReadDir", - "ReadFile", - "Readlink", - "Remove", - "RemoveAll", - "Rename", - "SEEK_CUR", - "SEEK_END", - "SEEK_SET", - "SameFile", - "Setenv", - "Signal", - "StartProcess", - "Stat", - "Stderr", - "Stdin", - "Stdout", - "Symlink", - "SyscallError", - "TempDir", - "Truncate", - "Unsetenv", - "UserCacheDir", - "UserConfigDir", - "UserHomeDir", - "WriteFile", - }, - "os/exec": { - "Cmd", - "Command", - "CommandContext", - "ErrDot", - "ErrNotFound", - "ErrWaitDelay", - "Error", - "ExitError", - "LookPath", - }, - "os/signal": { - "Ignore", - "Ignored", - "Notify", - "NotifyContext", - "Reset", - "Stop", - }, - "os/user": { - "Current", - "Group", - "Lookup", - "LookupGroup", - "LookupGroupId", - "LookupId", - "UnknownGroupError", - "UnknownGroupIdError", - "UnknownUserError", - "UnknownUserIdError", - "User", - }, - "path": { - "Base", - "Clean", - "Dir", - "ErrBadPattern", - "Ext", - "IsAbs", - "Join", - "Match", - "Split", - }, - "path/filepath": { - "Abs", - "Base", - "Clean", - "Dir", - "ErrBadPattern", - "EvalSymlinks", - "Ext", - "FromSlash", - "Glob", - "HasPrefix", - "IsAbs", - "IsLocal", - "Join", - "ListSeparator", - "Match", - "Rel", - "Separator", - "SkipAll", - "SkipDir", - "Split", - "SplitList", - "ToSlash", - "VolumeName", - "Walk", - "WalkDir", - "WalkFunc", - }, - "plugin": { - "Open", - "Plugin", - "Symbol", - }, - "reflect": { - "Append", - "AppendSlice", - "Array", - "ArrayOf", - "Bool", - "BothDir", - "Chan", - "ChanDir", - "ChanOf", - "Complex128", - "Complex64", - "Copy", - "DeepEqual", - "Float32", - "Float64", - "Func", - "FuncOf", - "Indirect", - "Int", - "Int16", - "Int32", - "Int64", - "Int8", - "Interface", - "Invalid", - "Kind", - "MakeChan", - "MakeFunc", - "MakeMap", - "MakeMapWithSize", - "MakeSlice", - "Map", - "MapIter", - "MapOf", - "Method", - "New", - "NewAt", - "Pointer", - "PointerTo", - "Ptr", - "PtrTo", - "RecvDir", - "Select", - "SelectCase", - "SelectDefault", - "SelectDir", - "SelectRecv", - "SelectSend", - "SendDir", - "Slice", - "SliceHeader", - "SliceOf", - "String", - "StringHeader", - "Struct", - "StructField", - "StructOf", - "StructTag", - "Swapper", - "Type", - "TypeOf", - "Uint", - "Uint16", - "Uint32", - "Uint64", - "Uint8", - "Uintptr", - "UnsafePointer", - "Value", - "ValueError", - "ValueOf", - "VisibleFields", - "Zero", - }, - "regexp": { - "Compile", - "CompilePOSIX", - "Match", - "MatchReader", - "MatchString", - "MustCompile", - "MustCompilePOSIX", - "QuoteMeta", - "Regexp", - }, - "regexp/syntax": { - "ClassNL", - "Compile", - "DotNL", - "EmptyBeginLine", - "EmptyBeginText", - "EmptyEndLine", - "EmptyEndText", - "EmptyNoWordBoundary", - "EmptyOp", - "EmptyOpContext", - "EmptyWordBoundary", - "ErrInternalError", - "ErrInvalidCharClass", - "ErrInvalidCharRange", - "ErrInvalidEscape", - "ErrInvalidNamedCapture", - "ErrInvalidPerlOp", - "ErrInvalidRepeatOp", - "ErrInvalidRepeatSize", - "ErrInvalidUTF8", - "ErrLarge", - "ErrMissingBracket", - "ErrMissingParen", - "ErrMissingRepeatArgument", - "ErrNestingDepth", - "ErrTrailingBackslash", - "ErrUnexpectedParen", - "Error", - "ErrorCode", - "Flags", - "FoldCase", - "Inst", - "InstAlt", - "InstAltMatch", - "InstCapture", - "InstEmptyWidth", - "InstFail", - "InstMatch", - "InstNop", - "InstOp", - "InstRune", - "InstRune1", - "InstRuneAny", - "InstRuneAnyNotNL", - "IsWordChar", - "Literal", - "MatchNL", - "NonGreedy", - "OneLine", - "Op", - "OpAlternate", - "OpAnyChar", - "OpAnyCharNotNL", - "OpBeginLine", - "OpBeginText", - "OpCapture", - "OpCharClass", - "OpConcat", - "OpEmptyMatch", - "OpEndLine", - "OpEndText", - "OpLiteral", - "OpNoMatch", - "OpNoWordBoundary", - "OpPlus", - "OpQuest", - "OpRepeat", - "OpStar", - "OpWordBoundary", - "POSIX", - "Parse", - "Perl", - "PerlX", - "Prog", - "Regexp", - "Simple", - "UnicodeGroups", - "WasDollar", - }, - "runtime": { - "BlockProfile", - "BlockProfileRecord", - "Breakpoint", - "CPUProfile", - "Caller", - "Callers", - "CallersFrames", - "Compiler", - "Error", - "Frame", - "Frames", - "Func", - "FuncForPC", - "GC", - "GOARCH", - "GOMAXPROCS", - "GOOS", - "GOROOT", - "Goexit", - "GoroutineProfile", - "Gosched", - "KeepAlive", - "LockOSThread", - "MemProfile", - "MemProfileRate", - "MemProfileRecord", - "MemStats", - "MutexProfile", - "NumCPU", - "NumCgoCall", - "NumGoroutine", - "PanicNilError", - "Pinner", - "ReadMemStats", - "ReadTrace", - "SetBlockProfileRate", - "SetCPUProfileRate", - "SetCgoTraceback", - "SetFinalizer", - "SetMutexProfileFraction", - "Stack", - "StackRecord", - "StartTrace", - "StopTrace", - "ThreadCreateProfile", - "TypeAssertionError", - "UnlockOSThread", - "Version", - }, - "runtime/cgo": { - "Handle", - "Incomplete", - "NewHandle", - }, - "runtime/coverage": { - "ClearCounters", - "WriteCounters", - "WriteCountersDir", - "WriteMeta", - "WriteMetaDir", - }, - "runtime/debug": { - "BuildInfo", - "BuildSetting", - "FreeOSMemory", - "GCStats", - "Module", - "ParseBuildInfo", - "PrintStack", - "ReadBuildInfo", - "ReadGCStats", - "SetGCPercent", - "SetMaxStack", - "SetMaxThreads", - "SetMemoryLimit", - "SetPanicOnFault", - "SetTraceback", - "Stack", - "WriteHeapDump", - }, - "runtime/metrics": { - "All", - "Description", - "Float64Histogram", - "KindBad", - "KindFloat64", - "KindFloat64Histogram", - "KindUint64", - "Read", - "Sample", - "Value", - "ValueKind", - }, - "runtime/pprof": { - "Do", - "ForLabels", - "Label", - "LabelSet", - "Labels", - "Lookup", - "NewProfile", - "Profile", - "Profiles", - "SetGoroutineLabels", - "StartCPUProfile", - "StopCPUProfile", - "WithLabels", - "WriteHeapProfile", - }, - "runtime/trace": { - "IsEnabled", - "Log", - "Logf", - "NewTask", - "Region", - "Start", - "StartRegion", - "Stop", - "Task", - "WithRegion", - }, - "slices": { - "BinarySearch", - "BinarySearchFunc", - "Clip", - "Clone", - "Compact", - "CompactFunc", - "Compare", - "CompareFunc", - "Contains", - "ContainsFunc", - "Delete", - "DeleteFunc", - "Equal", - "EqualFunc", - "Grow", - "Index", - "IndexFunc", - "Insert", - "IsSorted", - "IsSortedFunc", - "Max", - "MaxFunc", - "Min", - "MinFunc", - "Replace", - "Reverse", - "Sort", - "SortFunc", - "SortStableFunc", - }, - "sort": { - "Find", - "Float64Slice", - "Float64s", - "Float64sAreSorted", - "IntSlice", - "Interface", - "Ints", - "IntsAreSorted", - "IsSorted", - "Reverse", - "Search", - "SearchFloat64s", - "SearchInts", - "SearchStrings", - "Slice", - "SliceIsSorted", - "SliceStable", - "Sort", - "Stable", - "StringSlice", - "Strings", - "StringsAreSorted", - }, - "strconv": { - "AppendBool", - "AppendFloat", - "AppendInt", - "AppendQuote", - "AppendQuoteRune", - "AppendQuoteRuneToASCII", - "AppendQuoteRuneToGraphic", - "AppendQuoteToASCII", - "AppendQuoteToGraphic", - "AppendUint", - "Atoi", - "CanBackquote", - "ErrRange", - "ErrSyntax", - "FormatBool", - "FormatComplex", - "FormatFloat", - "FormatInt", - "FormatUint", - "IntSize", - "IsGraphic", - "IsPrint", - "Itoa", - "NumError", - "ParseBool", - "ParseComplex", - "ParseFloat", - "ParseInt", - "ParseUint", - "Quote", - "QuoteRune", - "QuoteRuneToASCII", - "QuoteRuneToGraphic", - "QuoteToASCII", - "QuoteToGraphic", - "QuotedPrefix", - "Unquote", - "UnquoteChar", - }, - "strings": { - "Builder", - "Clone", - "Compare", - "Contains", - "ContainsAny", - "ContainsFunc", - "ContainsRune", - "Count", - "Cut", - "CutPrefix", - "CutSuffix", - "EqualFold", - "Fields", - "FieldsFunc", - "HasPrefix", - "HasSuffix", - "Index", - "IndexAny", - "IndexByte", - "IndexFunc", - "IndexRune", - "Join", - "LastIndex", - "LastIndexAny", - "LastIndexByte", - "LastIndexFunc", - "Map", - "NewReader", - "NewReplacer", - "Reader", - "Repeat", - "Replace", - "ReplaceAll", - "Replacer", - "Split", - "SplitAfter", - "SplitAfterN", - "SplitN", - "Title", - "ToLower", - "ToLowerSpecial", - "ToTitle", - "ToTitleSpecial", - "ToUpper", - "ToUpperSpecial", - "ToValidUTF8", - "Trim", - "TrimFunc", - "TrimLeft", - "TrimLeftFunc", - "TrimPrefix", - "TrimRight", - "TrimRightFunc", - "TrimSpace", - "TrimSuffix", - }, - "sync": { - "Cond", - "Locker", - "Map", - "Mutex", - "NewCond", - "Once", - "OnceFunc", - "OnceValue", - "OnceValues", - "Pool", - "RWMutex", - "WaitGroup", - }, - "sync/atomic": { - "AddInt32", - "AddInt64", - "AddUint32", - "AddUint64", - "AddUintptr", - "Bool", - "CompareAndSwapInt32", - "CompareAndSwapInt64", - "CompareAndSwapPointer", - "CompareAndSwapUint32", - "CompareAndSwapUint64", - "CompareAndSwapUintptr", - "Int32", - "Int64", - "LoadInt32", - "LoadInt64", - "LoadPointer", - "LoadUint32", - "LoadUint64", - "LoadUintptr", - "Pointer", - "StoreInt32", - "StoreInt64", - "StorePointer", - "StoreUint32", - "StoreUint64", - "StoreUintptr", - "SwapInt32", - "SwapInt64", - "SwapPointer", - "SwapUint32", - "SwapUint64", - "SwapUintptr", - "Uint32", - "Uint64", - "Uintptr", - "Value", - }, - "syscall": { - "AF_ALG", - "AF_APPLETALK", - "AF_ARP", - "AF_ASH", - "AF_ATM", - "AF_ATMPVC", - "AF_ATMSVC", - "AF_AX25", - "AF_BLUETOOTH", - "AF_BRIDGE", - "AF_CAIF", - "AF_CAN", - "AF_CCITT", - "AF_CHAOS", - "AF_CNT", - "AF_COIP", - "AF_DATAKIT", - "AF_DECnet", - "AF_DLI", - "AF_E164", - "AF_ECMA", - "AF_ECONET", - "AF_ENCAP", - "AF_FILE", - "AF_HYLINK", - "AF_IEEE80211", - "AF_IEEE802154", - "AF_IMPLINK", - "AF_INET", - "AF_INET6", - "AF_INET6_SDP", - "AF_INET_SDP", - "AF_IPX", - "AF_IRDA", - "AF_ISDN", - "AF_ISO", - "AF_IUCV", - "AF_KEY", - "AF_LAT", - "AF_LINK", - "AF_LLC", - "AF_LOCAL", - "AF_MAX", - "AF_MPLS", - "AF_NATM", - "AF_NDRV", - "AF_NETBEUI", - "AF_NETBIOS", - "AF_NETGRAPH", - "AF_NETLINK", - "AF_NETROM", - "AF_NS", - "AF_OROUTE", - "AF_OSI", - "AF_PACKET", - "AF_PHONET", - "AF_PPP", - "AF_PPPOX", - "AF_PUP", - "AF_RDS", - "AF_RESERVED_36", - "AF_ROSE", - "AF_ROUTE", - "AF_RXRPC", - "AF_SCLUSTER", - "AF_SECURITY", - "AF_SIP", - "AF_SLOW", - "AF_SNA", - "AF_SYSTEM", - "AF_TIPC", - "AF_UNIX", - "AF_UNSPEC", - "AF_UTUN", - "AF_VENDOR00", - "AF_VENDOR01", - "AF_VENDOR02", - "AF_VENDOR03", - "AF_VENDOR04", - "AF_VENDOR05", - "AF_VENDOR06", - "AF_VENDOR07", - "AF_VENDOR08", - "AF_VENDOR09", - "AF_VENDOR10", - "AF_VENDOR11", - "AF_VENDOR12", - "AF_VENDOR13", - "AF_VENDOR14", - "AF_VENDOR15", - "AF_VENDOR16", - "AF_VENDOR17", - "AF_VENDOR18", - "AF_VENDOR19", - "AF_VENDOR20", - "AF_VENDOR21", - "AF_VENDOR22", - "AF_VENDOR23", - "AF_VENDOR24", - "AF_VENDOR25", - "AF_VENDOR26", - "AF_VENDOR27", - "AF_VENDOR28", - "AF_VENDOR29", - "AF_VENDOR30", - "AF_VENDOR31", - "AF_VENDOR32", - "AF_VENDOR33", - "AF_VENDOR34", - "AF_VENDOR35", - "AF_VENDOR36", - "AF_VENDOR37", - "AF_VENDOR38", - "AF_VENDOR39", - "AF_VENDOR40", - "AF_VENDOR41", - "AF_VENDOR42", - "AF_VENDOR43", - "AF_VENDOR44", - "AF_VENDOR45", - "AF_VENDOR46", - "AF_VENDOR47", - "AF_WANPIPE", - "AF_X25", - "AI_CANONNAME", - "AI_NUMERICHOST", - "AI_PASSIVE", - "APPLICATION_ERROR", - "ARPHRD_ADAPT", - "ARPHRD_APPLETLK", - "ARPHRD_ARCNET", - "ARPHRD_ASH", - "ARPHRD_ATM", - "ARPHRD_AX25", - "ARPHRD_BIF", - "ARPHRD_CHAOS", - "ARPHRD_CISCO", - "ARPHRD_CSLIP", - "ARPHRD_CSLIP6", - "ARPHRD_DDCMP", - "ARPHRD_DLCI", - "ARPHRD_ECONET", - "ARPHRD_EETHER", - "ARPHRD_ETHER", - "ARPHRD_EUI64", - "ARPHRD_FCAL", - "ARPHRD_FCFABRIC", - "ARPHRD_FCPL", - "ARPHRD_FCPP", - "ARPHRD_FDDI", - "ARPHRD_FRAD", - "ARPHRD_FRELAY", - "ARPHRD_HDLC", - "ARPHRD_HIPPI", - "ARPHRD_HWX25", - "ARPHRD_IEEE1394", - "ARPHRD_IEEE802", - "ARPHRD_IEEE80211", - "ARPHRD_IEEE80211_PRISM", - "ARPHRD_IEEE80211_RADIOTAP", - "ARPHRD_IEEE802154", - "ARPHRD_IEEE802154_PHY", - "ARPHRD_IEEE802_TR", - "ARPHRD_INFINIBAND", - "ARPHRD_IPDDP", - "ARPHRD_IPGRE", - "ARPHRD_IRDA", - "ARPHRD_LAPB", - "ARPHRD_LOCALTLK", - "ARPHRD_LOOPBACK", - "ARPHRD_METRICOM", - "ARPHRD_NETROM", - "ARPHRD_NONE", - "ARPHRD_PIMREG", - "ARPHRD_PPP", - "ARPHRD_PRONET", - "ARPHRD_RAWHDLC", - "ARPHRD_ROSE", - "ARPHRD_RSRVD", - "ARPHRD_SIT", - "ARPHRD_SKIP", - "ARPHRD_SLIP", - "ARPHRD_SLIP6", - "ARPHRD_STRIP", - "ARPHRD_TUNNEL", - "ARPHRD_TUNNEL6", - "ARPHRD_VOID", - "ARPHRD_X25", - "AUTHTYPE_CLIENT", - "AUTHTYPE_SERVER", - "Accept", - "Accept4", - "AcceptEx", - "Access", - "Acct", - "AddrinfoW", - "Adjtime", - "Adjtimex", - "AllThreadsSyscall", - "AllThreadsSyscall6", - "AttachLsf", - "B0", - "B1000000", - "B110", - "B115200", - "B1152000", - "B1200", - "B134", - "B14400", - "B150", - "B1500000", - "B1800", - "B19200", - "B200", - "B2000000", - "B230400", - "B2400", - "B2500000", - "B28800", - "B300", - "B3000000", - "B3500000", - "B38400", - "B4000000", - "B460800", - "B4800", - "B50", - "B500000", - "B57600", - "B576000", - "B600", - "B7200", - "B75", - "B76800", - "B921600", - "B9600", - "BASE_PROTOCOL", - "BIOCFEEDBACK", - "BIOCFLUSH", - "BIOCGBLEN", - "BIOCGDIRECTION", - "BIOCGDIRFILT", - "BIOCGDLT", - "BIOCGDLTLIST", - "BIOCGETBUFMODE", - "BIOCGETIF", - "BIOCGETZMAX", - "BIOCGFEEDBACK", - "BIOCGFILDROP", - "BIOCGHDRCMPLT", - "BIOCGRSIG", - "BIOCGRTIMEOUT", - "BIOCGSEESENT", - "BIOCGSTATS", - "BIOCGSTATSOLD", - "BIOCGTSTAMP", - "BIOCIMMEDIATE", - "BIOCLOCK", - "BIOCPROMISC", - "BIOCROTZBUF", - "BIOCSBLEN", - "BIOCSDIRECTION", - "BIOCSDIRFILT", - "BIOCSDLT", - "BIOCSETBUFMODE", - "BIOCSETF", - "BIOCSETFNR", - "BIOCSETIF", - "BIOCSETWF", - "BIOCSETZBUF", - "BIOCSFEEDBACK", - "BIOCSFILDROP", - "BIOCSHDRCMPLT", - "BIOCSRSIG", - "BIOCSRTIMEOUT", - "BIOCSSEESENT", - "BIOCSTCPF", - "BIOCSTSTAMP", - "BIOCSUDPF", - "BIOCVERSION", - "BPF_A", - "BPF_ABS", - "BPF_ADD", - "BPF_ALIGNMENT", - "BPF_ALIGNMENT32", - "BPF_ALU", - "BPF_AND", - "BPF_B", - "BPF_BUFMODE_BUFFER", - "BPF_BUFMODE_ZBUF", - "BPF_DFLTBUFSIZE", - "BPF_DIRECTION_IN", - "BPF_DIRECTION_OUT", - "BPF_DIV", - "BPF_H", - "BPF_IMM", - "BPF_IND", - "BPF_JA", - "BPF_JEQ", - "BPF_JGE", - "BPF_JGT", - "BPF_JMP", - "BPF_JSET", - "BPF_K", - "BPF_LD", - "BPF_LDX", - "BPF_LEN", - "BPF_LSH", - "BPF_MAJOR_VERSION", - "BPF_MAXBUFSIZE", - "BPF_MAXINSNS", - "BPF_MEM", - "BPF_MEMWORDS", - "BPF_MINBUFSIZE", - "BPF_MINOR_VERSION", - "BPF_MISC", - "BPF_MSH", - "BPF_MUL", - "BPF_NEG", - "BPF_OR", - "BPF_RELEASE", - "BPF_RET", - "BPF_RSH", - "BPF_ST", - "BPF_STX", - "BPF_SUB", - "BPF_TAX", - "BPF_TXA", - "BPF_T_BINTIME", - "BPF_T_BINTIME_FAST", - "BPF_T_BINTIME_MONOTONIC", - "BPF_T_BINTIME_MONOTONIC_FAST", - "BPF_T_FAST", - "BPF_T_FLAG_MASK", - "BPF_T_FORMAT_MASK", - "BPF_T_MICROTIME", - "BPF_T_MICROTIME_FAST", - "BPF_T_MICROTIME_MONOTONIC", - "BPF_T_MICROTIME_MONOTONIC_FAST", - "BPF_T_MONOTONIC", - "BPF_T_MONOTONIC_FAST", - "BPF_T_NANOTIME", - "BPF_T_NANOTIME_FAST", - "BPF_T_NANOTIME_MONOTONIC", - "BPF_T_NANOTIME_MONOTONIC_FAST", - "BPF_T_NONE", - "BPF_T_NORMAL", - "BPF_W", - "BPF_X", - "BRKINT", - "Bind", - "BindToDevice", - "BpfBuflen", - "BpfDatalink", - "BpfHdr", - "BpfHeadercmpl", - "BpfInsn", - "BpfInterface", - "BpfJump", - "BpfProgram", - "BpfStat", - "BpfStats", - "BpfStmt", - "BpfTimeout", - "BpfTimeval", - "BpfVersion", - "BpfZbuf", - "BpfZbufHeader", - "ByHandleFileInformation", - "BytePtrFromString", - "ByteSliceFromString", - "CCR0_FLUSH", - "CERT_CHAIN_POLICY_AUTHENTICODE", - "CERT_CHAIN_POLICY_AUTHENTICODE_TS", - "CERT_CHAIN_POLICY_BASE", - "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", - "CERT_CHAIN_POLICY_EV", - "CERT_CHAIN_POLICY_MICROSOFT_ROOT", - "CERT_CHAIN_POLICY_NT_AUTH", - "CERT_CHAIN_POLICY_SSL", - "CERT_E_CN_NO_MATCH", - "CERT_E_EXPIRED", - "CERT_E_PURPOSE", - "CERT_E_ROLE", - "CERT_E_UNTRUSTEDROOT", - "CERT_STORE_ADD_ALWAYS", - "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", - "CERT_STORE_PROV_MEMORY", - "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", - "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", - "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", - "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", - "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", - "CERT_TRUST_INVALID_BASIC_CONSTRAINTS", - "CERT_TRUST_INVALID_EXTENSION", - "CERT_TRUST_INVALID_NAME_CONSTRAINTS", - "CERT_TRUST_INVALID_POLICY_CONSTRAINTS", - "CERT_TRUST_IS_CYCLIC", - "CERT_TRUST_IS_EXPLICIT_DISTRUST", - "CERT_TRUST_IS_NOT_SIGNATURE_VALID", - "CERT_TRUST_IS_NOT_TIME_VALID", - "CERT_TRUST_IS_NOT_VALID_FOR_USAGE", - "CERT_TRUST_IS_OFFLINE_REVOCATION", - "CERT_TRUST_IS_REVOKED", - "CERT_TRUST_IS_UNTRUSTED_ROOT", - "CERT_TRUST_NO_ERROR", - "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", - "CERT_TRUST_REVOCATION_STATUS_UNKNOWN", - "CFLUSH", - "CLOCAL", - "CLONE_CHILD_CLEARTID", - "CLONE_CHILD_SETTID", - "CLONE_CLEAR_SIGHAND", - "CLONE_CSIGNAL", - "CLONE_DETACHED", - "CLONE_FILES", - "CLONE_FS", - "CLONE_INTO_CGROUP", - "CLONE_IO", - "CLONE_NEWCGROUP", - "CLONE_NEWIPC", - "CLONE_NEWNET", - "CLONE_NEWNS", - "CLONE_NEWPID", - "CLONE_NEWTIME", - "CLONE_NEWUSER", - "CLONE_NEWUTS", - "CLONE_PARENT", - "CLONE_PARENT_SETTID", - "CLONE_PID", - "CLONE_PIDFD", - "CLONE_PTRACE", - "CLONE_SETTLS", - "CLONE_SIGHAND", - "CLONE_SYSVSEM", - "CLONE_THREAD", - "CLONE_UNTRACED", - "CLONE_VFORK", - "CLONE_VM", - "CPUID_CFLUSH", - "CREAD", - "CREATE_ALWAYS", - "CREATE_NEW", - "CREATE_NEW_PROCESS_GROUP", - "CREATE_UNICODE_ENVIRONMENT", - "CRYPT_DEFAULT_CONTAINER_OPTIONAL", - "CRYPT_DELETEKEYSET", - "CRYPT_MACHINE_KEYSET", - "CRYPT_NEWKEYSET", - "CRYPT_SILENT", - "CRYPT_VERIFYCONTEXT", - "CS5", - "CS6", - "CS7", - "CS8", - "CSIZE", - "CSTART", - "CSTATUS", - "CSTOP", - "CSTOPB", - "CSUSP", - "CTL_MAXNAME", - "CTL_NET", - "CTL_QUERY", - "CTRL_BREAK_EVENT", - "CTRL_CLOSE_EVENT", - "CTRL_C_EVENT", - "CTRL_LOGOFF_EVENT", - "CTRL_SHUTDOWN_EVENT", - "CancelIo", - "CancelIoEx", - "CertAddCertificateContextToStore", - "CertChainContext", - "CertChainElement", - "CertChainPara", - "CertChainPolicyPara", - "CertChainPolicyStatus", - "CertCloseStore", - "CertContext", - "CertCreateCertificateContext", - "CertEnhKeyUsage", - "CertEnumCertificatesInStore", - "CertFreeCertificateChain", - "CertFreeCertificateContext", - "CertGetCertificateChain", - "CertInfo", - "CertOpenStore", - "CertOpenSystemStore", - "CertRevocationCrlInfo", - "CertRevocationInfo", - "CertSimpleChain", - "CertTrustListInfo", - "CertTrustStatus", - "CertUsageMatch", - "CertVerifyCertificateChainPolicy", - "Chdir", - "CheckBpfVersion", - "Chflags", - "Chmod", - "Chown", - "Chroot", - "Clearenv", - "Close", - "CloseHandle", - "CloseOnExec", - "Closesocket", - "CmsgLen", - "CmsgSpace", - "Cmsghdr", - "CommandLineToArgv", - "ComputerName", - "Conn", - "Connect", - "ConnectEx", - "ConvertSidToStringSid", - "ConvertStringSidToSid", - "CopySid", - "Creat", - "CreateDirectory", - "CreateFile", - "CreateFileMapping", - "CreateHardLink", - "CreateIoCompletionPort", - "CreatePipe", - "CreateProcess", - "CreateProcessAsUser", - "CreateSymbolicLink", - "CreateToolhelp32Snapshot", - "Credential", - "CryptAcquireContext", - "CryptGenRandom", - "CryptReleaseContext", - "DIOCBSFLUSH", - "DIOCOSFPFLUSH", - "DLL", - "DLLError", - "DLT_A429", - "DLT_A653_ICM", - "DLT_AIRONET_HEADER", - "DLT_AOS", - "DLT_APPLE_IP_OVER_IEEE1394", - "DLT_ARCNET", - "DLT_ARCNET_LINUX", - "DLT_ATM_CLIP", - "DLT_ATM_RFC1483", - "DLT_AURORA", - "DLT_AX25", - "DLT_AX25_KISS", - "DLT_BACNET_MS_TP", - "DLT_BLUETOOTH_HCI_H4", - "DLT_BLUETOOTH_HCI_H4_WITH_PHDR", - "DLT_CAN20B", - "DLT_CAN_SOCKETCAN", - "DLT_CHAOS", - "DLT_CHDLC", - "DLT_CISCO_IOS", - "DLT_C_HDLC", - "DLT_C_HDLC_WITH_DIR", - "DLT_DBUS", - "DLT_DECT", - "DLT_DOCSIS", - "DLT_DVB_CI", - "DLT_ECONET", - "DLT_EN10MB", - "DLT_EN3MB", - "DLT_ENC", - "DLT_ERF", - "DLT_ERF_ETH", - "DLT_ERF_POS", - "DLT_FC_2", - "DLT_FC_2_WITH_FRAME_DELIMS", - "DLT_FDDI", - "DLT_FLEXRAY", - "DLT_FRELAY", - "DLT_FRELAY_WITH_DIR", - "DLT_GCOM_SERIAL", - "DLT_GCOM_T1E1", - "DLT_GPF_F", - "DLT_GPF_T", - "DLT_GPRS_LLC", - "DLT_GSMTAP_ABIS", - "DLT_GSMTAP_UM", - "DLT_HDLC", - "DLT_HHDLC", - "DLT_HIPPI", - "DLT_IBM_SN", - "DLT_IBM_SP", - "DLT_IEEE802", - "DLT_IEEE802_11", - "DLT_IEEE802_11_RADIO", - "DLT_IEEE802_11_RADIO_AVS", - "DLT_IEEE802_15_4", - "DLT_IEEE802_15_4_LINUX", - "DLT_IEEE802_15_4_NOFCS", - "DLT_IEEE802_15_4_NONASK_PHY", - "DLT_IEEE802_16_MAC_CPS", - "DLT_IEEE802_16_MAC_CPS_RADIO", - "DLT_IPFILTER", - "DLT_IPMB", - "DLT_IPMB_LINUX", - "DLT_IPNET", - "DLT_IPOIB", - "DLT_IPV4", - "DLT_IPV6", - "DLT_IP_OVER_FC", - "DLT_JUNIPER_ATM1", - "DLT_JUNIPER_ATM2", - "DLT_JUNIPER_ATM_CEMIC", - "DLT_JUNIPER_CHDLC", - "DLT_JUNIPER_ES", - "DLT_JUNIPER_ETHER", - "DLT_JUNIPER_FIBRECHANNEL", - "DLT_JUNIPER_FRELAY", - "DLT_JUNIPER_GGSN", - "DLT_JUNIPER_ISM", - "DLT_JUNIPER_MFR", - "DLT_JUNIPER_MLFR", - "DLT_JUNIPER_MLPPP", - "DLT_JUNIPER_MONITOR", - "DLT_JUNIPER_PIC_PEER", - "DLT_JUNIPER_PPP", - "DLT_JUNIPER_PPPOE", - "DLT_JUNIPER_PPPOE_ATM", - "DLT_JUNIPER_SERVICES", - "DLT_JUNIPER_SRX_E2E", - "DLT_JUNIPER_ST", - "DLT_JUNIPER_VP", - "DLT_JUNIPER_VS", - "DLT_LAPB_WITH_DIR", - "DLT_LAPD", - "DLT_LIN", - "DLT_LINUX_EVDEV", - "DLT_LINUX_IRDA", - "DLT_LINUX_LAPD", - "DLT_LINUX_PPP_WITHDIRECTION", - "DLT_LINUX_SLL", - "DLT_LOOP", - "DLT_LTALK", - "DLT_MATCHING_MAX", - "DLT_MATCHING_MIN", - "DLT_MFR", - "DLT_MOST", - "DLT_MPEG_2_TS", - "DLT_MPLS", - "DLT_MTP2", - "DLT_MTP2_WITH_PHDR", - "DLT_MTP3", - "DLT_MUX27010", - "DLT_NETANALYZER", - "DLT_NETANALYZER_TRANSPARENT", - "DLT_NFC_LLCP", - "DLT_NFLOG", - "DLT_NG40", - "DLT_NULL", - "DLT_PCI_EXP", - "DLT_PFLOG", - "DLT_PFSYNC", - "DLT_PPI", - "DLT_PPP", - "DLT_PPP_BSDOS", - "DLT_PPP_ETHER", - "DLT_PPP_PPPD", - "DLT_PPP_SERIAL", - "DLT_PPP_WITH_DIR", - "DLT_PPP_WITH_DIRECTION", - "DLT_PRISM_HEADER", - "DLT_PRONET", - "DLT_RAIF1", - "DLT_RAW", - "DLT_RAWAF_MASK", - "DLT_RIO", - "DLT_SCCP", - "DLT_SITA", - "DLT_SLIP", - "DLT_SLIP_BSDOS", - "DLT_STANAG_5066_D_PDU", - "DLT_SUNATM", - "DLT_SYMANTEC_FIREWALL", - "DLT_TZSP", - "DLT_USB", - "DLT_USB_LINUX", - "DLT_USB_LINUX_MMAPPED", - "DLT_USER0", - "DLT_USER1", - "DLT_USER10", - "DLT_USER11", - "DLT_USER12", - "DLT_USER13", - "DLT_USER14", - "DLT_USER15", - "DLT_USER2", - "DLT_USER3", - "DLT_USER4", - "DLT_USER5", - "DLT_USER6", - "DLT_USER7", - "DLT_USER8", - "DLT_USER9", - "DLT_WIHART", - "DLT_X2E_SERIAL", - "DLT_X2E_XORAYA", - "DNSMXData", - "DNSPTRData", - "DNSRecord", - "DNSSRVData", - "DNSTXTData", - "DNS_INFO_NO_RECORDS", - "DNS_TYPE_A", - "DNS_TYPE_A6", - "DNS_TYPE_AAAA", - "DNS_TYPE_ADDRS", - "DNS_TYPE_AFSDB", - "DNS_TYPE_ALL", - "DNS_TYPE_ANY", - "DNS_TYPE_ATMA", - "DNS_TYPE_AXFR", - "DNS_TYPE_CERT", - "DNS_TYPE_CNAME", - "DNS_TYPE_DHCID", - "DNS_TYPE_DNAME", - "DNS_TYPE_DNSKEY", - "DNS_TYPE_DS", - "DNS_TYPE_EID", - "DNS_TYPE_GID", - "DNS_TYPE_GPOS", - "DNS_TYPE_HINFO", - "DNS_TYPE_ISDN", - "DNS_TYPE_IXFR", - "DNS_TYPE_KEY", - "DNS_TYPE_KX", - "DNS_TYPE_LOC", - "DNS_TYPE_MAILA", - "DNS_TYPE_MAILB", - "DNS_TYPE_MB", - "DNS_TYPE_MD", - "DNS_TYPE_MF", - "DNS_TYPE_MG", - "DNS_TYPE_MINFO", - "DNS_TYPE_MR", - "DNS_TYPE_MX", - "DNS_TYPE_NAPTR", - "DNS_TYPE_NBSTAT", - "DNS_TYPE_NIMLOC", - "DNS_TYPE_NS", - "DNS_TYPE_NSAP", - "DNS_TYPE_NSAPPTR", - "DNS_TYPE_NSEC", - "DNS_TYPE_NULL", - "DNS_TYPE_NXT", - "DNS_TYPE_OPT", - "DNS_TYPE_PTR", - "DNS_TYPE_PX", - "DNS_TYPE_RP", - "DNS_TYPE_RRSIG", - "DNS_TYPE_RT", - "DNS_TYPE_SIG", - "DNS_TYPE_SINK", - "DNS_TYPE_SOA", - "DNS_TYPE_SRV", - "DNS_TYPE_TEXT", - "DNS_TYPE_TKEY", - "DNS_TYPE_TSIG", - "DNS_TYPE_UID", - "DNS_TYPE_UINFO", - "DNS_TYPE_UNSPEC", - "DNS_TYPE_WINS", - "DNS_TYPE_WINSR", - "DNS_TYPE_WKS", - "DNS_TYPE_X25", - "DT_BLK", - "DT_CHR", - "DT_DIR", - "DT_FIFO", - "DT_LNK", - "DT_REG", - "DT_SOCK", - "DT_UNKNOWN", - "DT_WHT", - "DUPLICATE_CLOSE_SOURCE", - "DUPLICATE_SAME_ACCESS", - "DeleteFile", - "DetachLsf", - "DeviceIoControl", - "Dirent", - "DnsNameCompare", - "DnsQuery", - "DnsRecordListFree", - "DnsSectionAdditional", - "DnsSectionAnswer", - "DnsSectionAuthority", - "DnsSectionQuestion", - "Dup", - "Dup2", - "Dup3", - "DuplicateHandle", - "E2BIG", - "EACCES", - "EADDRINUSE", - "EADDRNOTAVAIL", - "EADV", - "EAFNOSUPPORT", - "EAGAIN", - "EALREADY", - "EAUTH", - "EBADARCH", - "EBADE", - "EBADEXEC", - "EBADF", - "EBADFD", - "EBADMACHO", - "EBADMSG", - "EBADR", - "EBADRPC", - "EBADRQC", - "EBADSLT", - "EBFONT", - "EBUSY", - "ECANCELED", - "ECAPMODE", - "ECHILD", - "ECHO", - "ECHOCTL", - "ECHOE", - "ECHOK", - "ECHOKE", - "ECHONL", - "ECHOPRT", - "ECHRNG", - "ECOMM", - "ECONNABORTED", - "ECONNREFUSED", - "ECONNRESET", - "EDEADLK", - "EDEADLOCK", - "EDESTADDRREQ", - "EDEVERR", - "EDOM", - "EDOOFUS", - "EDOTDOT", - "EDQUOT", - "EEXIST", - "EFAULT", - "EFBIG", - "EFER_LMA", - "EFER_LME", - "EFER_NXE", - "EFER_SCE", - "EFTYPE", - "EHOSTDOWN", - "EHOSTUNREACH", - "EHWPOISON", - "EIDRM", - "EILSEQ", - "EINPROGRESS", - "EINTR", - "EINVAL", - "EIO", - "EIPSEC", - "EISCONN", - "EISDIR", - "EISNAM", - "EKEYEXPIRED", - "EKEYREJECTED", - "EKEYREVOKED", - "EL2HLT", - "EL2NSYNC", - "EL3HLT", - "EL3RST", - "ELAST", - "ELF_NGREG", - "ELF_PRARGSZ", - "ELIBACC", - "ELIBBAD", - "ELIBEXEC", - "ELIBMAX", - "ELIBSCN", - "ELNRNG", - "ELOOP", - "EMEDIUMTYPE", - "EMFILE", - "EMLINK", - "EMSGSIZE", - "EMT_TAGOVF", - "EMULTIHOP", - "EMUL_ENABLED", - "EMUL_LINUX", - "EMUL_LINUX32", - "EMUL_MAXID", - "EMUL_NATIVE", - "ENAMETOOLONG", - "ENAVAIL", - "ENDRUNDISC", - "ENEEDAUTH", - "ENETDOWN", - "ENETRESET", - "ENETUNREACH", - "ENFILE", - "ENOANO", - "ENOATTR", - "ENOBUFS", - "ENOCSI", - "ENODATA", - "ENODEV", - "ENOENT", - "ENOEXEC", - "ENOKEY", - "ENOLCK", - "ENOLINK", - "ENOMEDIUM", - "ENOMEM", - "ENOMSG", - "ENONET", - "ENOPKG", - "ENOPOLICY", - "ENOPROTOOPT", - "ENOSPC", - "ENOSR", - "ENOSTR", - "ENOSYS", - "ENOTBLK", - "ENOTCAPABLE", - "ENOTCONN", - "ENOTDIR", - "ENOTEMPTY", - "ENOTNAM", - "ENOTRECOVERABLE", - "ENOTSOCK", - "ENOTSUP", - "ENOTTY", - "ENOTUNIQ", - "ENXIO", - "EN_SW_CTL_INF", - "EN_SW_CTL_PREC", - "EN_SW_CTL_ROUND", - "EN_SW_DATACHAIN", - "EN_SW_DENORM", - "EN_SW_INVOP", - "EN_SW_OVERFLOW", - "EN_SW_PRECLOSS", - "EN_SW_UNDERFLOW", - "EN_SW_ZERODIV", - "EOPNOTSUPP", - "EOVERFLOW", - "EOWNERDEAD", - "EPERM", - "EPFNOSUPPORT", - "EPIPE", - "EPOLLERR", - "EPOLLET", - "EPOLLHUP", - "EPOLLIN", - "EPOLLMSG", - "EPOLLONESHOT", - "EPOLLOUT", - "EPOLLPRI", - "EPOLLRDBAND", - "EPOLLRDHUP", - "EPOLLRDNORM", - "EPOLLWRBAND", - "EPOLLWRNORM", - "EPOLL_CLOEXEC", - "EPOLL_CTL_ADD", - "EPOLL_CTL_DEL", - "EPOLL_CTL_MOD", - "EPOLL_NONBLOCK", - "EPROCLIM", - "EPROCUNAVAIL", - "EPROGMISMATCH", - "EPROGUNAVAIL", - "EPROTO", - "EPROTONOSUPPORT", - "EPROTOTYPE", - "EPWROFF", - "EQFULL", - "ERANGE", - "EREMCHG", - "EREMOTE", - "EREMOTEIO", - "ERESTART", - "ERFKILL", - "EROFS", - "ERPCMISMATCH", - "ERROR_ACCESS_DENIED", - "ERROR_ALREADY_EXISTS", - "ERROR_BROKEN_PIPE", - "ERROR_BUFFER_OVERFLOW", - "ERROR_DIR_NOT_EMPTY", - "ERROR_ENVVAR_NOT_FOUND", - "ERROR_FILE_EXISTS", - "ERROR_FILE_NOT_FOUND", - "ERROR_HANDLE_EOF", - "ERROR_INSUFFICIENT_BUFFER", - "ERROR_IO_PENDING", - "ERROR_MOD_NOT_FOUND", - "ERROR_MORE_DATA", - "ERROR_NETNAME_DELETED", - "ERROR_NOT_FOUND", - "ERROR_NO_MORE_FILES", - "ERROR_OPERATION_ABORTED", - "ERROR_PATH_NOT_FOUND", - "ERROR_PRIVILEGE_NOT_HELD", - "ERROR_PROC_NOT_FOUND", - "ESHLIBVERS", - "ESHUTDOWN", - "ESOCKTNOSUPPORT", - "ESPIPE", - "ESRCH", - "ESRMNT", - "ESTALE", - "ESTRPIPE", - "ETHERCAP_JUMBO_MTU", - "ETHERCAP_VLAN_HWTAGGING", - "ETHERCAP_VLAN_MTU", - "ETHERMIN", - "ETHERMTU", - "ETHERMTU_JUMBO", - "ETHERTYPE_8023", - "ETHERTYPE_AARP", - "ETHERTYPE_ACCTON", - "ETHERTYPE_AEONIC", - "ETHERTYPE_ALPHA", - "ETHERTYPE_AMBER", - "ETHERTYPE_AMOEBA", - "ETHERTYPE_AOE", - "ETHERTYPE_APOLLO", - "ETHERTYPE_APOLLODOMAIN", - "ETHERTYPE_APPLETALK", - "ETHERTYPE_APPLITEK", - "ETHERTYPE_ARGONAUT", - "ETHERTYPE_ARP", - "ETHERTYPE_AT", - "ETHERTYPE_ATALK", - "ETHERTYPE_ATOMIC", - "ETHERTYPE_ATT", - "ETHERTYPE_ATTSTANFORD", - "ETHERTYPE_AUTOPHON", - "ETHERTYPE_AXIS", - "ETHERTYPE_BCLOOP", - "ETHERTYPE_BOFL", - "ETHERTYPE_CABLETRON", - "ETHERTYPE_CHAOS", - "ETHERTYPE_COMDESIGN", - "ETHERTYPE_COMPUGRAPHIC", - "ETHERTYPE_COUNTERPOINT", - "ETHERTYPE_CRONUS", - "ETHERTYPE_CRONUSVLN", - "ETHERTYPE_DCA", - "ETHERTYPE_DDE", - "ETHERTYPE_DEBNI", - "ETHERTYPE_DECAM", - "ETHERTYPE_DECCUST", - "ETHERTYPE_DECDIAG", - "ETHERTYPE_DECDNS", - "ETHERTYPE_DECDTS", - "ETHERTYPE_DECEXPER", - "ETHERTYPE_DECLAST", - "ETHERTYPE_DECLTM", - "ETHERTYPE_DECMUMPS", - "ETHERTYPE_DECNETBIOS", - "ETHERTYPE_DELTACON", - "ETHERTYPE_DIDDLE", - "ETHERTYPE_DLOG1", - "ETHERTYPE_DLOG2", - "ETHERTYPE_DN", - "ETHERTYPE_DOGFIGHT", - "ETHERTYPE_DSMD", - "ETHERTYPE_ECMA", - "ETHERTYPE_ENCRYPT", - "ETHERTYPE_ES", - "ETHERTYPE_EXCELAN", - "ETHERTYPE_EXPERDATA", - "ETHERTYPE_FLIP", - "ETHERTYPE_FLOWCONTROL", - "ETHERTYPE_FRARP", - "ETHERTYPE_GENDYN", - "ETHERTYPE_HAYES", - "ETHERTYPE_HIPPI_FP", - "ETHERTYPE_HITACHI", - "ETHERTYPE_HP", - "ETHERTYPE_IEEEPUP", - "ETHERTYPE_IEEEPUPAT", - "ETHERTYPE_IMLBL", - "ETHERTYPE_IMLBLDIAG", - "ETHERTYPE_IP", - "ETHERTYPE_IPAS", - "ETHERTYPE_IPV6", - "ETHERTYPE_IPX", - "ETHERTYPE_IPXNEW", - "ETHERTYPE_KALPANA", - "ETHERTYPE_LANBRIDGE", - "ETHERTYPE_LANPROBE", - "ETHERTYPE_LAT", - "ETHERTYPE_LBACK", - "ETHERTYPE_LITTLE", - "ETHERTYPE_LLDP", - "ETHERTYPE_LOGICRAFT", - "ETHERTYPE_LOOPBACK", - "ETHERTYPE_MATRA", - "ETHERTYPE_MAX", - "ETHERTYPE_MERIT", - "ETHERTYPE_MICP", - "ETHERTYPE_MOPDL", - "ETHERTYPE_MOPRC", - "ETHERTYPE_MOTOROLA", - "ETHERTYPE_MPLS", - "ETHERTYPE_MPLS_MCAST", - "ETHERTYPE_MUMPS", - "ETHERTYPE_NBPCC", - "ETHERTYPE_NBPCLAIM", - "ETHERTYPE_NBPCLREQ", - "ETHERTYPE_NBPCLRSP", - "ETHERTYPE_NBPCREQ", - "ETHERTYPE_NBPCRSP", - "ETHERTYPE_NBPDG", - "ETHERTYPE_NBPDGB", - "ETHERTYPE_NBPDLTE", - "ETHERTYPE_NBPRAR", - "ETHERTYPE_NBPRAS", - "ETHERTYPE_NBPRST", - "ETHERTYPE_NBPSCD", - "ETHERTYPE_NBPVCD", - "ETHERTYPE_NBS", - "ETHERTYPE_NCD", - "ETHERTYPE_NESTAR", - "ETHERTYPE_NETBEUI", - "ETHERTYPE_NOVELL", - "ETHERTYPE_NS", - "ETHERTYPE_NSAT", - "ETHERTYPE_NSCOMPAT", - "ETHERTYPE_NTRAILER", - "ETHERTYPE_OS9", - "ETHERTYPE_OS9NET", - "ETHERTYPE_PACER", - "ETHERTYPE_PAE", - "ETHERTYPE_PCS", - "ETHERTYPE_PLANNING", - "ETHERTYPE_PPP", - "ETHERTYPE_PPPOE", - "ETHERTYPE_PPPOEDISC", - "ETHERTYPE_PRIMENTS", - "ETHERTYPE_PUP", - "ETHERTYPE_PUPAT", - "ETHERTYPE_QINQ", - "ETHERTYPE_RACAL", - "ETHERTYPE_RATIONAL", - "ETHERTYPE_RAWFR", - "ETHERTYPE_RCL", - "ETHERTYPE_RDP", - "ETHERTYPE_RETIX", - "ETHERTYPE_REVARP", - "ETHERTYPE_SCA", - "ETHERTYPE_SECTRA", - "ETHERTYPE_SECUREDATA", - "ETHERTYPE_SGITW", - "ETHERTYPE_SG_BOUNCE", - "ETHERTYPE_SG_DIAG", - "ETHERTYPE_SG_NETGAMES", - "ETHERTYPE_SG_RESV", - "ETHERTYPE_SIMNET", - "ETHERTYPE_SLOW", - "ETHERTYPE_SLOWPROTOCOLS", - "ETHERTYPE_SNA", - "ETHERTYPE_SNMP", - "ETHERTYPE_SONIX", - "ETHERTYPE_SPIDER", - "ETHERTYPE_SPRITE", - "ETHERTYPE_STP", - "ETHERTYPE_TALARIS", - "ETHERTYPE_TALARISMC", - "ETHERTYPE_TCPCOMP", - "ETHERTYPE_TCPSM", - "ETHERTYPE_TEC", - "ETHERTYPE_TIGAN", - "ETHERTYPE_TRAIL", - "ETHERTYPE_TRANSETHER", - "ETHERTYPE_TYMSHARE", - "ETHERTYPE_UBBST", - "ETHERTYPE_UBDEBUG", - "ETHERTYPE_UBDIAGLOOP", - "ETHERTYPE_UBDL", - "ETHERTYPE_UBNIU", - "ETHERTYPE_UBNMC", - "ETHERTYPE_VALID", - "ETHERTYPE_VARIAN", - "ETHERTYPE_VAXELN", - "ETHERTYPE_VEECO", - "ETHERTYPE_VEXP", - "ETHERTYPE_VGLAB", - "ETHERTYPE_VINES", - "ETHERTYPE_VINESECHO", - "ETHERTYPE_VINESLOOP", - "ETHERTYPE_VITAL", - "ETHERTYPE_VLAN", - "ETHERTYPE_VLTLMAN", - "ETHERTYPE_VPROD", - "ETHERTYPE_VURESERVED", - "ETHERTYPE_WATERLOO", - "ETHERTYPE_WELLFLEET", - "ETHERTYPE_X25", - "ETHERTYPE_X75", - "ETHERTYPE_XNSSM", - "ETHERTYPE_XTP", - "ETHER_ADDR_LEN", - "ETHER_ALIGN", - "ETHER_CRC_LEN", - "ETHER_CRC_POLY_BE", - "ETHER_CRC_POLY_LE", - "ETHER_HDR_LEN", - "ETHER_MAX_DIX_LEN", - "ETHER_MAX_LEN", - "ETHER_MAX_LEN_JUMBO", - "ETHER_MIN_LEN", - "ETHER_PPPOE_ENCAP_LEN", - "ETHER_TYPE_LEN", - "ETHER_VLAN_ENCAP_LEN", - "ETH_P_1588", - "ETH_P_8021Q", - "ETH_P_802_2", - "ETH_P_802_3", - "ETH_P_AARP", - "ETH_P_ALL", - "ETH_P_AOE", - "ETH_P_ARCNET", - "ETH_P_ARP", - "ETH_P_ATALK", - "ETH_P_ATMFATE", - "ETH_P_ATMMPOA", - "ETH_P_AX25", - "ETH_P_BPQ", - "ETH_P_CAIF", - "ETH_P_CAN", - "ETH_P_CONTROL", - "ETH_P_CUST", - "ETH_P_DDCMP", - "ETH_P_DEC", - "ETH_P_DIAG", - "ETH_P_DNA_DL", - "ETH_P_DNA_RC", - "ETH_P_DNA_RT", - "ETH_P_DSA", - "ETH_P_ECONET", - "ETH_P_EDSA", - "ETH_P_FCOE", - "ETH_P_FIP", - "ETH_P_HDLC", - "ETH_P_IEEE802154", - "ETH_P_IEEEPUP", - "ETH_P_IEEEPUPAT", - "ETH_P_IP", - "ETH_P_IPV6", - "ETH_P_IPX", - "ETH_P_IRDA", - "ETH_P_LAT", - "ETH_P_LINK_CTL", - "ETH_P_LOCALTALK", - "ETH_P_LOOP", - "ETH_P_MOBITEX", - "ETH_P_MPLS_MC", - "ETH_P_MPLS_UC", - "ETH_P_PAE", - "ETH_P_PAUSE", - "ETH_P_PHONET", - "ETH_P_PPPTALK", - "ETH_P_PPP_DISC", - "ETH_P_PPP_MP", - "ETH_P_PPP_SES", - "ETH_P_PUP", - "ETH_P_PUPAT", - "ETH_P_RARP", - "ETH_P_SCA", - "ETH_P_SLOW", - "ETH_P_SNAP", - "ETH_P_TEB", - "ETH_P_TIPC", - "ETH_P_TRAILER", - "ETH_P_TR_802_2", - "ETH_P_WAN_PPP", - "ETH_P_WCCP", - "ETH_P_X25", - "ETIME", - "ETIMEDOUT", - "ETOOMANYREFS", - "ETXTBSY", - "EUCLEAN", - "EUNATCH", - "EUSERS", - "EVFILT_AIO", - "EVFILT_FS", - "EVFILT_LIO", - "EVFILT_MACHPORT", - "EVFILT_PROC", - "EVFILT_READ", - "EVFILT_SIGNAL", - "EVFILT_SYSCOUNT", - "EVFILT_THREADMARKER", - "EVFILT_TIMER", - "EVFILT_USER", - "EVFILT_VM", - "EVFILT_VNODE", - "EVFILT_WRITE", - "EV_ADD", - "EV_CLEAR", - "EV_DELETE", - "EV_DISABLE", - "EV_DISPATCH", - "EV_DROP", - "EV_ENABLE", - "EV_EOF", - "EV_ERROR", - "EV_FLAG0", - "EV_FLAG1", - "EV_ONESHOT", - "EV_OOBAND", - "EV_POLL", - "EV_RECEIPT", - "EV_SYSFLAGS", - "EWINDOWS", - "EWOULDBLOCK", - "EXDEV", - "EXFULL", - "EXTA", - "EXTB", - "EXTPROC", - "Environ", - "EpollCreate", - "EpollCreate1", - "EpollCtl", - "EpollEvent", - "EpollWait", - "Errno", - "EscapeArg", - "Exchangedata", - "Exec", - "Exit", - "ExitProcess", - "FD_CLOEXEC", - "FD_SETSIZE", - "FILE_ACTION_ADDED", - "FILE_ACTION_MODIFIED", - "FILE_ACTION_REMOVED", - "FILE_ACTION_RENAMED_NEW_NAME", - "FILE_ACTION_RENAMED_OLD_NAME", - "FILE_APPEND_DATA", - "FILE_ATTRIBUTE_ARCHIVE", - "FILE_ATTRIBUTE_DIRECTORY", - "FILE_ATTRIBUTE_HIDDEN", - "FILE_ATTRIBUTE_NORMAL", - "FILE_ATTRIBUTE_READONLY", - "FILE_ATTRIBUTE_REPARSE_POINT", - "FILE_ATTRIBUTE_SYSTEM", - "FILE_BEGIN", - "FILE_CURRENT", - "FILE_END", - "FILE_FLAG_BACKUP_SEMANTICS", - "FILE_FLAG_OPEN_REPARSE_POINT", - "FILE_FLAG_OVERLAPPED", - "FILE_LIST_DIRECTORY", - "FILE_MAP_COPY", - "FILE_MAP_EXECUTE", - "FILE_MAP_READ", - "FILE_MAP_WRITE", - "FILE_NOTIFY_CHANGE_ATTRIBUTES", - "FILE_NOTIFY_CHANGE_CREATION", - "FILE_NOTIFY_CHANGE_DIR_NAME", - "FILE_NOTIFY_CHANGE_FILE_NAME", - "FILE_NOTIFY_CHANGE_LAST_ACCESS", - "FILE_NOTIFY_CHANGE_LAST_WRITE", - "FILE_NOTIFY_CHANGE_SIZE", - "FILE_SHARE_DELETE", - "FILE_SHARE_READ", - "FILE_SHARE_WRITE", - "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", - "FILE_SKIP_SET_EVENT_ON_HANDLE", - "FILE_TYPE_CHAR", - "FILE_TYPE_DISK", - "FILE_TYPE_PIPE", - "FILE_TYPE_REMOTE", - "FILE_TYPE_UNKNOWN", - "FILE_WRITE_ATTRIBUTES", - "FLUSHO", - "FORMAT_MESSAGE_ALLOCATE_BUFFER", - "FORMAT_MESSAGE_ARGUMENT_ARRAY", - "FORMAT_MESSAGE_FROM_HMODULE", - "FORMAT_MESSAGE_FROM_STRING", - "FORMAT_MESSAGE_FROM_SYSTEM", - "FORMAT_MESSAGE_IGNORE_INSERTS", - "FORMAT_MESSAGE_MAX_WIDTH_MASK", - "FSCTL_GET_REPARSE_POINT", - "F_ADDFILESIGS", - "F_ADDSIGS", - "F_ALLOCATEALL", - "F_ALLOCATECONTIG", - "F_CANCEL", - "F_CHKCLEAN", - "F_CLOSEM", - "F_DUP2FD", - "F_DUP2FD_CLOEXEC", - "F_DUPFD", - "F_DUPFD_CLOEXEC", - "F_EXLCK", - "F_FINDSIGS", - "F_FLUSH_DATA", - "F_FREEZE_FS", - "F_FSCTL", - "F_FSDIRMASK", - "F_FSIN", - "F_FSINOUT", - "F_FSOUT", - "F_FSPRIV", - "F_FSVOID", - "F_FULLFSYNC", - "F_GETCODEDIR", - "F_GETFD", - "F_GETFL", - "F_GETLEASE", - "F_GETLK", - "F_GETLK64", - "F_GETLKPID", - "F_GETNOSIGPIPE", - "F_GETOWN", - "F_GETOWN_EX", - "F_GETPATH", - "F_GETPATH_MTMINFO", - "F_GETPIPE_SZ", - "F_GETPROTECTIONCLASS", - "F_GETPROTECTIONLEVEL", - "F_GETSIG", - "F_GLOBAL_NOCACHE", - "F_LOCK", - "F_LOG2PHYS", - "F_LOG2PHYS_EXT", - "F_MARKDEPENDENCY", - "F_MAXFD", - "F_NOCACHE", - "F_NODIRECT", - "F_NOTIFY", - "F_OGETLK", - "F_OK", - "F_OSETLK", - "F_OSETLKW", - "F_PARAM_MASK", - "F_PARAM_MAX", - "F_PATHPKG_CHECK", - "F_PEOFPOSMODE", - "F_PREALLOCATE", - "F_RDADVISE", - "F_RDAHEAD", - "F_RDLCK", - "F_READAHEAD", - "F_READBOOTSTRAP", - "F_SETBACKINGSTORE", - "F_SETFD", - "F_SETFL", - "F_SETLEASE", - "F_SETLK", - "F_SETLK64", - "F_SETLKW", - "F_SETLKW64", - "F_SETLKWTIMEOUT", - "F_SETLK_REMOTE", - "F_SETNOSIGPIPE", - "F_SETOWN", - "F_SETOWN_EX", - "F_SETPIPE_SZ", - "F_SETPROTECTIONCLASS", - "F_SETSIG", - "F_SETSIZE", - "F_SHLCK", - "F_SINGLE_WRITER", - "F_TEST", - "F_THAW_FS", - "F_TLOCK", - "F_TRANSCODEKEY", - "F_ULOCK", - "F_UNLCK", - "F_UNLCKSYS", - "F_VOLPOSMODE", - "F_WRITEBOOTSTRAP", - "F_WRLCK", - "Faccessat", - "Fallocate", - "Fbootstraptransfer_t", - "Fchdir", - "Fchflags", - "Fchmod", - "Fchmodat", - "Fchown", - "Fchownat", - "FcntlFlock", - "FdSet", - "Fdatasync", - "FileNotifyInformation", - "Filetime", - "FindClose", - "FindFirstFile", - "FindNextFile", - "Flock", - "Flock_t", - "FlushBpf", - "FlushFileBuffers", - "FlushViewOfFile", - "ForkExec", - "ForkLock", - "FormatMessage", - "Fpathconf", - "FreeAddrInfoW", - "FreeEnvironmentStrings", - "FreeLibrary", - "Fsid", - "Fstat", - "Fstatat", - "Fstatfs", - "Fstore_t", - "Fsync", - "Ftruncate", - "FullPath", - "Futimes", - "Futimesat", - "GENERIC_ALL", - "GENERIC_EXECUTE", - "GENERIC_READ", - "GENERIC_WRITE", - "GUID", - "GetAcceptExSockaddrs", - "GetAdaptersInfo", - "GetAddrInfoW", - "GetCommandLine", - "GetComputerName", - "GetConsoleMode", - "GetCurrentDirectory", - "GetCurrentProcess", - "GetEnvironmentStrings", - "GetEnvironmentVariable", - "GetExitCodeProcess", - "GetFileAttributes", - "GetFileAttributesEx", - "GetFileExInfoStandard", - "GetFileExMaxInfoLevel", - "GetFileInformationByHandle", - "GetFileType", - "GetFullPathName", - "GetHostByName", - "GetIfEntry", - "GetLastError", - "GetLengthSid", - "GetLongPathName", - "GetProcAddress", - "GetProcessTimes", - "GetProtoByName", - "GetQueuedCompletionStatus", - "GetServByName", - "GetShortPathName", - "GetStartupInfo", - "GetStdHandle", - "GetSystemTimeAsFileTime", - "GetTempPath", - "GetTimeZoneInformation", - "GetTokenInformation", - "GetUserNameEx", - "GetUserProfileDirectory", - "GetVersion", - "Getcwd", - "Getdents", - "Getdirentries", - "Getdtablesize", - "Getegid", - "Getenv", - "Geteuid", - "Getfsstat", - "Getgid", - "Getgroups", - "Getpagesize", - "Getpeername", - "Getpgid", - "Getpgrp", - "Getpid", - "Getppid", - "Getpriority", - "Getrlimit", - "Getrusage", - "Getsid", - "Getsockname", - "Getsockopt", - "GetsockoptByte", - "GetsockoptICMPv6Filter", - "GetsockoptIPMreq", - "GetsockoptIPMreqn", - "GetsockoptIPv6MTUInfo", - "GetsockoptIPv6Mreq", - "GetsockoptInet4Addr", - "GetsockoptInt", - "GetsockoptUcred", - "Gettid", - "Gettimeofday", - "Getuid", - "Getwd", - "Getxattr", - "HANDLE_FLAG_INHERIT", - "HKEY_CLASSES_ROOT", - "HKEY_CURRENT_CONFIG", - "HKEY_CURRENT_USER", - "HKEY_DYN_DATA", - "HKEY_LOCAL_MACHINE", - "HKEY_PERFORMANCE_DATA", - "HKEY_USERS", - "HUPCL", - "Handle", - "Hostent", - "ICANON", - "ICMP6_FILTER", - "ICMPV6_FILTER", - "ICMPv6Filter", - "ICRNL", - "IEXTEN", - "IFAN_ARRIVAL", - "IFAN_DEPARTURE", - "IFA_ADDRESS", - "IFA_ANYCAST", - "IFA_BROADCAST", - "IFA_CACHEINFO", - "IFA_F_DADFAILED", - "IFA_F_DEPRECATED", - "IFA_F_HOMEADDRESS", - "IFA_F_NODAD", - "IFA_F_OPTIMISTIC", - "IFA_F_PERMANENT", - "IFA_F_SECONDARY", - "IFA_F_TEMPORARY", - "IFA_F_TENTATIVE", - "IFA_LABEL", - "IFA_LOCAL", - "IFA_MAX", - "IFA_MULTICAST", - "IFA_ROUTE", - "IFA_UNSPEC", - "IFF_ALLMULTI", - "IFF_ALTPHYS", - "IFF_AUTOMEDIA", - "IFF_BROADCAST", - "IFF_CANTCHANGE", - "IFF_CANTCONFIG", - "IFF_DEBUG", - "IFF_DRV_OACTIVE", - "IFF_DRV_RUNNING", - "IFF_DYING", - "IFF_DYNAMIC", - "IFF_LINK0", - "IFF_LINK1", - "IFF_LINK2", - "IFF_LOOPBACK", - "IFF_MASTER", - "IFF_MONITOR", - "IFF_MULTICAST", - "IFF_NOARP", - "IFF_NOTRAILERS", - "IFF_NO_PI", - "IFF_OACTIVE", - "IFF_ONE_QUEUE", - "IFF_POINTOPOINT", - "IFF_POINTTOPOINT", - "IFF_PORTSEL", - "IFF_PPROMISC", - "IFF_PROMISC", - "IFF_RENAMING", - "IFF_RUNNING", - "IFF_SIMPLEX", - "IFF_SLAVE", - "IFF_SMART", - "IFF_STATICARP", - "IFF_TAP", - "IFF_TUN", - "IFF_TUN_EXCL", - "IFF_UP", - "IFF_VNET_HDR", - "IFLA_ADDRESS", - "IFLA_BROADCAST", - "IFLA_COST", - "IFLA_IFALIAS", - "IFLA_IFNAME", - "IFLA_LINK", - "IFLA_LINKINFO", - "IFLA_LINKMODE", - "IFLA_MAP", - "IFLA_MASTER", - "IFLA_MAX", - "IFLA_MTU", - "IFLA_NET_NS_PID", - "IFLA_OPERSTATE", - "IFLA_PRIORITY", - "IFLA_PROTINFO", - "IFLA_QDISC", - "IFLA_STATS", - "IFLA_TXQLEN", - "IFLA_UNSPEC", - "IFLA_WEIGHT", - "IFLA_WIRELESS", - "IFNAMSIZ", - "IFT_1822", - "IFT_A12MPPSWITCH", - "IFT_AAL2", - "IFT_AAL5", - "IFT_ADSL", - "IFT_AFLANE8023", - "IFT_AFLANE8025", - "IFT_ARAP", - "IFT_ARCNET", - "IFT_ARCNETPLUS", - "IFT_ASYNC", - "IFT_ATM", - "IFT_ATMDXI", - "IFT_ATMFUNI", - "IFT_ATMIMA", - "IFT_ATMLOGICAL", - "IFT_ATMRADIO", - "IFT_ATMSUBINTERFACE", - "IFT_ATMVCIENDPT", - "IFT_ATMVIRTUAL", - "IFT_BGPPOLICYACCOUNTING", - "IFT_BLUETOOTH", - "IFT_BRIDGE", - "IFT_BSC", - "IFT_CARP", - "IFT_CCTEMUL", - "IFT_CELLULAR", - "IFT_CEPT", - "IFT_CES", - "IFT_CHANNEL", - "IFT_CNR", - "IFT_COFFEE", - "IFT_COMPOSITELINK", - "IFT_DCN", - "IFT_DIGITALPOWERLINE", - "IFT_DIGITALWRAPPEROVERHEADCHANNEL", - "IFT_DLSW", - "IFT_DOCSCABLEDOWNSTREAM", - "IFT_DOCSCABLEMACLAYER", - "IFT_DOCSCABLEUPSTREAM", - "IFT_DOCSCABLEUPSTREAMCHANNEL", - "IFT_DS0", - "IFT_DS0BUNDLE", - "IFT_DS1FDL", - "IFT_DS3", - "IFT_DTM", - "IFT_DUMMY", - "IFT_DVBASILN", - "IFT_DVBASIOUT", - "IFT_DVBRCCDOWNSTREAM", - "IFT_DVBRCCMACLAYER", - "IFT_DVBRCCUPSTREAM", - "IFT_ECONET", - "IFT_ENC", - "IFT_EON", - "IFT_EPLRS", - "IFT_ESCON", - "IFT_ETHER", - "IFT_FAITH", - "IFT_FAST", - "IFT_FASTETHER", - "IFT_FASTETHERFX", - "IFT_FDDI", - "IFT_FIBRECHANNEL", - "IFT_FRAMERELAYINTERCONNECT", - "IFT_FRAMERELAYMPI", - "IFT_FRDLCIENDPT", - "IFT_FRELAY", - "IFT_FRELAYDCE", - "IFT_FRF16MFRBUNDLE", - "IFT_FRFORWARD", - "IFT_G703AT2MB", - "IFT_G703AT64K", - "IFT_GIF", - "IFT_GIGABITETHERNET", - "IFT_GR303IDT", - "IFT_GR303RDT", - "IFT_H323GATEKEEPER", - "IFT_H323PROXY", - "IFT_HDH1822", - "IFT_HDLC", - "IFT_HDSL2", - "IFT_HIPERLAN2", - "IFT_HIPPI", - "IFT_HIPPIINTERFACE", - "IFT_HOSTPAD", - "IFT_HSSI", - "IFT_HY", - "IFT_IBM370PARCHAN", - "IFT_IDSL", - "IFT_IEEE1394", - "IFT_IEEE80211", - "IFT_IEEE80212", - "IFT_IEEE8023ADLAG", - "IFT_IFGSN", - "IFT_IMT", - "IFT_INFINIBAND", - "IFT_INTERLEAVE", - "IFT_IP", - "IFT_IPFORWARD", - "IFT_IPOVERATM", - "IFT_IPOVERCDLC", - "IFT_IPOVERCLAW", - "IFT_IPSWITCH", - "IFT_IPXIP", - "IFT_ISDN", - "IFT_ISDNBASIC", - "IFT_ISDNPRIMARY", - "IFT_ISDNS", - "IFT_ISDNU", - "IFT_ISO88022LLC", - "IFT_ISO88023", - "IFT_ISO88024", - "IFT_ISO88025", - "IFT_ISO88025CRFPINT", - "IFT_ISO88025DTR", - "IFT_ISO88025FIBER", - "IFT_ISO88026", - "IFT_ISUP", - "IFT_L2VLAN", - "IFT_L3IPVLAN", - "IFT_L3IPXVLAN", - "IFT_LAPB", - "IFT_LAPD", - "IFT_LAPF", - "IFT_LINEGROUP", - "IFT_LOCALTALK", - "IFT_LOOP", - "IFT_MEDIAMAILOVERIP", - "IFT_MFSIGLINK", - "IFT_MIOX25", - "IFT_MODEM", - "IFT_MPC", - "IFT_MPLS", - "IFT_MPLSTUNNEL", - "IFT_MSDSL", - "IFT_MVL", - "IFT_MYRINET", - "IFT_NFAS", - "IFT_NSIP", - "IFT_OPTICALCHANNEL", - "IFT_OPTICALTRANSPORT", - "IFT_OTHER", - "IFT_P10", - "IFT_P80", - "IFT_PARA", - "IFT_PDP", - "IFT_PFLOG", - "IFT_PFLOW", - "IFT_PFSYNC", - "IFT_PLC", - "IFT_PON155", - "IFT_PON622", - "IFT_POS", - "IFT_PPP", - "IFT_PPPMULTILINKBUNDLE", - "IFT_PROPATM", - "IFT_PROPBWAP2MP", - "IFT_PROPCNLS", - "IFT_PROPDOCSWIRELESSDOWNSTREAM", - "IFT_PROPDOCSWIRELESSMACLAYER", - "IFT_PROPDOCSWIRELESSUPSTREAM", - "IFT_PROPMUX", - "IFT_PROPVIRTUAL", - "IFT_PROPWIRELESSP2P", - "IFT_PTPSERIAL", - "IFT_PVC", - "IFT_Q2931", - "IFT_QLLC", - "IFT_RADIOMAC", - "IFT_RADSL", - "IFT_REACHDSL", - "IFT_RFC1483", - "IFT_RS232", - "IFT_RSRB", - "IFT_SDLC", - "IFT_SDSL", - "IFT_SHDSL", - "IFT_SIP", - "IFT_SIPSIG", - "IFT_SIPTG", - "IFT_SLIP", - "IFT_SMDSDXI", - "IFT_SMDSICIP", - "IFT_SONET", - "IFT_SONETOVERHEADCHANNEL", - "IFT_SONETPATH", - "IFT_SONETVT", - "IFT_SRP", - "IFT_SS7SIGLINK", - "IFT_STACKTOSTACK", - "IFT_STARLAN", - "IFT_STF", - "IFT_T1", - "IFT_TDLC", - "IFT_TELINK", - "IFT_TERMPAD", - "IFT_TR008", - "IFT_TRANSPHDLC", - "IFT_TUNNEL", - "IFT_ULTRA", - "IFT_USB", - "IFT_V11", - "IFT_V35", - "IFT_V36", - "IFT_V37", - "IFT_VDSL", - "IFT_VIRTUALIPADDRESS", - "IFT_VIRTUALTG", - "IFT_VOICEDID", - "IFT_VOICEEM", - "IFT_VOICEEMFGD", - "IFT_VOICEENCAP", - "IFT_VOICEFGDEANA", - "IFT_VOICEFXO", - "IFT_VOICEFXS", - "IFT_VOICEOVERATM", - "IFT_VOICEOVERCABLE", - "IFT_VOICEOVERFRAMERELAY", - "IFT_VOICEOVERIP", - "IFT_X213", - "IFT_X25", - "IFT_X25DDN", - "IFT_X25HUNTGROUP", - "IFT_X25MLP", - "IFT_X25PLE", - "IFT_XETHER", - "IGNBRK", - "IGNCR", - "IGNORE", - "IGNPAR", - "IMAXBEL", - "INFINITE", - "INLCR", - "INPCK", - "INVALID_FILE_ATTRIBUTES", - "IN_ACCESS", - "IN_ALL_EVENTS", - "IN_ATTRIB", - "IN_CLASSA_HOST", - "IN_CLASSA_MAX", - "IN_CLASSA_NET", - "IN_CLASSA_NSHIFT", - "IN_CLASSB_HOST", - "IN_CLASSB_MAX", - "IN_CLASSB_NET", - "IN_CLASSB_NSHIFT", - "IN_CLASSC_HOST", - "IN_CLASSC_NET", - "IN_CLASSC_NSHIFT", - "IN_CLASSD_HOST", - "IN_CLASSD_NET", - "IN_CLASSD_NSHIFT", - "IN_CLOEXEC", - "IN_CLOSE", - "IN_CLOSE_NOWRITE", - "IN_CLOSE_WRITE", - "IN_CREATE", - "IN_DELETE", - "IN_DELETE_SELF", - "IN_DONT_FOLLOW", - "IN_EXCL_UNLINK", - "IN_IGNORED", - "IN_ISDIR", - "IN_LINKLOCALNETNUM", - "IN_LOOPBACKNET", - "IN_MASK_ADD", - "IN_MODIFY", - "IN_MOVE", - "IN_MOVED_FROM", - "IN_MOVED_TO", - "IN_MOVE_SELF", - "IN_NONBLOCK", - "IN_ONESHOT", - "IN_ONLYDIR", - "IN_OPEN", - "IN_Q_OVERFLOW", - "IN_RFC3021_HOST", - "IN_RFC3021_MASK", - "IN_RFC3021_NET", - "IN_RFC3021_NSHIFT", - "IN_UNMOUNT", - "IOC_IN", - "IOC_INOUT", - "IOC_OUT", - "IOC_VENDOR", - "IOC_WS2", - "IO_REPARSE_TAG_SYMLINK", - "IPMreq", - "IPMreqn", - "IPPROTO_3PC", - "IPPROTO_ADFS", - "IPPROTO_AH", - "IPPROTO_AHIP", - "IPPROTO_APES", - "IPPROTO_ARGUS", - "IPPROTO_AX25", - "IPPROTO_BHA", - "IPPROTO_BLT", - "IPPROTO_BRSATMON", - "IPPROTO_CARP", - "IPPROTO_CFTP", - "IPPROTO_CHAOS", - "IPPROTO_CMTP", - "IPPROTO_COMP", - "IPPROTO_CPHB", - "IPPROTO_CPNX", - "IPPROTO_DCCP", - "IPPROTO_DDP", - "IPPROTO_DGP", - "IPPROTO_DIVERT", - "IPPROTO_DIVERT_INIT", - "IPPROTO_DIVERT_RESP", - "IPPROTO_DONE", - "IPPROTO_DSTOPTS", - "IPPROTO_EGP", - "IPPROTO_EMCON", - "IPPROTO_ENCAP", - "IPPROTO_EON", - "IPPROTO_ESP", - "IPPROTO_ETHERIP", - "IPPROTO_FRAGMENT", - "IPPROTO_GGP", - "IPPROTO_GMTP", - "IPPROTO_GRE", - "IPPROTO_HELLO", - "IPPROTO_HMP", - "IPPROTO_HOPOPTS", - "IPPROTO_ICMP", - "IPPROTO_ICMPV6", - "IPPROTO_IDP", - "IPPROTO_IDPR", - "IPPROTO_IDRP", - "IPPROTO_IGMP", - "IPPROTO_IGP", - "IPPROTO_IGRP", - "IPPROTO_IL", - "IPPROTO_INLSP", - "IPPROTO_INP", - "IPPROTO_IP", - "IPPROTO_IPCOMP", - "IPPROTO_IPCV", - "IPPROTO_IPEIP", - "IPPROTO_IPIP", - "IPPROTO_IPPC", - "IPPROTO_IPV4", - "IPPROTO_IPV6", - "IPPROTO_IPV6_ICMP", - "IPPROTO_IRTP", - "IPPROTO_KRYPTOLAN", - "IPPROTO_LARP", - "IPPROTO_LEAF1", - "IPPROTO_LEAF2", - "IPPROTO_MAX", - "IPPROTO_MAXID", - "IPPROTO_MEAS", - "IPPROTO_MH", - "IPPROTO_MHRP", - "IPPROTO_MICP", - "IPPROTO_MOBILE", - "IPPROTO_MPLS", - "IPPROTO_MTP", - "IPPROTO_MUX", - "IPPROTO_ND", - "IPPROTO_NHRP", - "IPPROTO_NONE", - "IPPROTO_NSP", - "IPPROTO_NVPII", - "IPPROTO_OLD_DIVERT", - "IPPROTO_OSPFIGP", - "IPPROTO_PFSYNC", - "IPPROTO_PGM", - "IPPROTO_PIGP", - "IPPROTO_PIM", - "IPPROTO_PRM", - "IPPROTO_PUP", - "IPPROTO_PVP", - "IPPROTO_RAW", - "IPPROTO_RCCMON", - "IPPROTO_RDP", - "IPPROTO_ROUTING", - "IPPROTO_RSVP", - "IPPROTO_RVD", - "IPPROTO_SATEXPAK", - "IPPROTO_SATMON", - "IPPROTO_SCCSP", - "IPPROTO_SCTP", - "IPPROTO_SDRP", - "IPPROTO_SEND", - "IPPROTO_SEP", - "IPPROTO_SKIP", - "IPPROTO_SPACER", - "IPPROTO_SRPC", - "IPPROTO_ST", - "IPPROTO_SVMTP", - "IPPROTO_SWIPE", - "IPPROTO_TCF", - "IPPROTO_TCP", - "IPPROTO_TLSP", - "IPPROTO_TP", - "IPPROTO_TPXX", - "IPPROTO_TRUNK1", - "IPPROTO_TRUNK2", - "IPPROTO_TTP", - "IPPROTO_UDP", - "IPPROTO_UDPLITE", - "IPPROTO_VINES", - "IPPROTO_VISA", - "IPPROTO_VMTP", - "IPPROTO_VRRP", - "IPPROTO_WBEXPAK", - "IPPROTO_WBMON", - "IPPROTO_WSN", - "IPPROTO_XNET", - "IPPROTO_XTP", - "IPV6_2292DSTOPTS", - "IPV6_2292HOPLIMIT", - "IPV6_2292HOPOPTS", - "IPV6_2292NEXTHOP", - "IPV6_2292PKTINFO", - "IPV6_2292PKTOPTIONS", - "IPV6_2292RTHDR", - "IPV6_ADDRFORM", - "IPV6_ADD_MEMBERSHIP", - "IPV6_AUTHHDR", - "IPV6_AUTH_LEVEL", - "IPV6_AUTOFLOWLABEL", - "IPV6_BINDANY", - "IPV6_BINDV6ONLY", - "IPV6_BOUND_IF", - "IPV6_CHECKSUM", - "IPV6_DEFAULT_MULTICAST_HOPS", - "IPV6_DEFAULT_MULTICAST_LOOP", - "IPV6_DEFHLIM", - "IPV6_DONTFRAG", - "IPV6_DROP_MEMBERSHIP", - "IPV6_DSTOPTS", - "IPV6_ESP_NETWORK_LEVEL", - "IPV6_ESP_TRANS_LEVEL", - "IPV6_FAITH", - "IPV6_FLOWINFO_MASK", - "IPV6_FLOWLABEL_MASK", - "IPV6_FRAGTTL", - "IPV6_FW_ADD", - "IPV6_FW_DEL", - "IPV6_FW_FLUSH", - "IPV6_FW_GET", - "IPV6_FW_ZERO", - "IPV6_HLIMDEC", - "IPV6_HOPLIMIT", - "IPV6_HOPOPTS", - "IPV6_IPCOMP_LEVEL", - "IPV6_IPSEC_POLICY", - "IPV6_JOIN_ANYCAST", - "IPV6_JOIN_GROUP", - "IPV6_LEAVE_ANYCAST", - "IPV6_LEAVE_GROUP", - "IPV6_MAXHLIM", - "IPV6_MAXOPTHDR", - "IPV6_MAXPACKET", - "IPV6_MAX_GROUP_SRC_FILTER", - "IPV6_MAX_MEMBERSHIPS", - "IPV6_MAX_SOCK_SRC_FILTER", - "IPV6_MIN_MEMBERSHIPS", - "IPV6_MMTU", - "IPV6_MSFILTER", - "IPV6_MTU", - "IPV6_MTU_DISCOVER", - "IPV6_MULTICAST_HOPS", - "IPV6_MULTICAST_IF", - "IPV6_MULTICAST_LOOP", - "IPV6_NEXTHOP", - "IPV6_OPTIONS", - "IPV6_PATHMTU", - "IPV6_PIPEX", - "IPV6_PKTINFO", - "IPV6_PMTUDISC_DO", - "IPV6_PMTUDISC_DONT", - "IPV6_PMTUDISC_PROBE", - "IPV6_PMTUDISC_WANT", - "IPV6_PORTRANGE", - "IPV6_PORTRANGE_DEFAULT", - "IPV6_PORTRANGE_HIGH", - "IPV6_PORTRANGE_LOW", - "IPV6_PREFER_TEMPADDR", - "IPV6_RECVDSTOPTS", - "IPV6_RECVDSTPORT", - "IPV6_RECVERR", - "IPV6_RECVHOPLIMIT", - "IPV6_RECVHOPOPTS", - "IPV6_RECVPATHMTU", - "IPV6_RECVPKTINFO", - "IPV6_RECVRTHDR", - "IPV6_RECVTCLASS", - "IPV6_ROUTER_ALERT", - "IPV6_RTABLE", - "IPV6_RTHDR", - "IPV6_RTHDRDSTOPTS", - "IPV6_RTHDR_LOOSE", - "IPV6_RTHDR_STRICT", - "IPV6_RTHDR_TYPE_0", - "IPV6_RXDSTOPTS", - "IPV6_RXHOPOPTS", - "IPV6_SOCKOPT_RESERVED1", - "IPV6_TCLASS", - "IPV6_UNICAST_HOPS", - "IPV6_USE_MIN_MTU", - "IPV6_V6ONLY", - "IPV6_VERSION", - "IPV6_VERSION_MASK", - "IPV6_XFRM_POLICY", - "IP_ADD_MEMBERSHIP", - "IP_ADD_SOURCE_MEMBERSHIP", - "IP_AUTH_LEVEL", - "IP_BINDANY", - "IP_BLOCK_SOURCE", - "IP_BOUND_IF", - "IP_DEFAULT_MULTICAST_LOOP", - "IP_DEFAULT_MULTICAST_TTL", - "IP_DF", - "IP_DIVERTFL", - "IP_DONTFRAG", - "IP_DROP_MEMBERSHIP", - "IP_DROP_SOURCE_MEMBERSHIP", - "IP_DUMMYNET3", - "IP_DUMMYNET_CONFIGURE", - "IP_DUMMYNET_DEL", - "IP_DUMMYNET_FLUSH", - "IP_DUMMYNET_GET", - "IP_EF", - "IP_ERRORMTU", - "IP_ESP_NETWORK_LEVEL", - "IP_ESP_TRANS_LEVEL", - "IP_FAITH", - "IP_FREEBIND", - "IP_FW3", - "IP_FW_ADD", - "IP_FW_DEL", - "IP_FW_FLUSH", - "IP_FW_GET", - "IP_FW_NAT_CFG", - "IP_FW_NAT_DEL", - "IP_FW_NAT_GET_CONFIG", - "IP_FW_NAT_GET_LOG", - "IP_FW_RESETLOG", - "IP_FW_TABLE_ADD", - "IP_FW_TABLE_DEL", - "IP_FW_TABLE_FLUSH", - "IP_FW_TABLE_GETSIZE", - "IP_FW_TABLE_LIST", - "IP_FW_ZERO", - "IP_HDRINCL", - "IP_IPCOMP_LEVEL", - "IP_IPSECFLOWINFO", - "IP_IPSEC_LOCAL_AUTH", - "IP_IPSEC_LOCAL_CRED", - "IP_IPSEC_LOCAL_ID", - "IP_IPSEC_POLICY", - "IP_IPSEC_REMOTE_AUTH", - "IP_IPSEC_REMOTE_CRED", - "IP_IPSEC_REMOTE_ID", - "IP_MAXPACKET", - "IP_MAX_GROUP_SRC_FILTER", - "IP_MAX_MEMBERSHIPS", - "IP_MAX_SOCK_MUTE_FILTER", - "IP_MAX_SOCK_SRC_FILTER", - "IP_MAX_SOURCE_FILTER", - "IP_MF", - "IP_MINFRAGSIZE", - "IP_MINTTL", - "IP_MIN_MEMBERSHIPS", - "IP_MSFILTER", - "IP_MSS", - "IP_MTU", - "IP_MTU_DISCOVER", - "IP_MULTICAST_IF", - "IP_MULTICAST_IFINDEX", - "IP_MULTICAST_LOOP", - "IP_MULTICAST_TTL", - "IP_MULTICAST_VIF", - "IP_NAT__XXX", - "IP_OFFMASK", - "IP_OLD_FW_ADD", - "IP_OLD_FW_DEL", - "IP_OLD_FW_FLUSH", - "IP_OLD_FW_GET", - "IP_OLD_FW_RESETLOG", - "IP_OLD_FW_ZERO", - "IP_ONESBCAST", - "IP_OPTIONS", - "IP_ORIGDSTADDR", - "IP_PASSSEC", - "IP_PIPEX", - "IP_PKTINFO", - "IP_PKTOPTIONS", - "IP_PMTUDISC", - "IP_PMTUDISC_DO", - "IP_PMTUDISC_DONT", - "IP_PMTUDISC_PROBE", - "IP_PMTUDISC_WANT", - "IP_PORTRANGE", - "IP_PORTRANGE_DEFAULT", - "IP_PORTRANGE_HIGH", - "IP_PORTRANGE_LOW", - "IP_RECVDSTADDR", - "IP_RECVDSTPORT", - "IP_RECVERR", - "IP_RECVIF", - "IP_RECVOPTS", - "IP_RECVORIGDSTADDR", - "IP_RECVPKTINFO", - "IP_RECVRETOPTS", - "IP_RECVRTABLE", - "IP_RECVTOS", - "IP_RECVTTL", - "IP_RETOPTS", - "IP_RF", - "IP_ROUTER_ALERT", - "IP_RSVP_OFF", - "IP_RSVP_ON", - "IP_RSVP_VIF_OFF", - "IP_RSVP_VIF_ON", - "IP_RTABLE", - "IP_SENDSRCADDR", - "IP_STRIPHDR", - "IP_TOS", - "IP_TRAFFIC_MGT_BACKGROUND", - "IP_TRANSPARENT", - "IP_TTL", - "IP_UNBLOCK_SOURCE", - "IP_XFRM_POLICY", - "IPv6MTUInfo", - "IPv6Mreq", - "ISIG", - "ISTRIP", - "IUCLC", - "IUTF8", - "IXANY", - "IXOFF", - "IXON", - "IfAddrmsg", - "IfAnnounceMsghdr", - "IfData", - "IfInfomsg", - "IfMsghdr", - "IfaMsghdr", - "IfmaMsghdr", - "IfmaMsghdr2", - "ImplementsGetwd", - "Inet4Pktinfo", - "Inet6Pktinfo", - "InotifyAddWatch", - "InotifyEvent", - "InotifyInit", - "InotifyInit1", - "InotifyRmWatch", - "InterfaceAddrMessage", - "InterfaceAnnounceMessage", - "InterfaceInfo", - "InterfaceMessage", - "InterfaceMulticastAddrMessage", - "InvalidHandle", - "Ioperm", - "Iopl", - "Iovec", - "IpAdapterInfo", - "IpAddrString", - "IpAddressString", - "IpMaskString", - "Issetugid", - "KEY_ALL_ACCESS", - "KEY_CREATE_LINK", - "KEY_CREATE_SUB_KEY", - "KEY_ENUMERATE_SUB_KEYS", - "KEY_EXECUTE", - "KEY_NOTIFY", - "KEY_QUERY_VALUE", - "KEY_READ", - "KEY_SET_VALUE", - "KEY_WOW64_32KEY", - "KEY_WOW64_64KEY", - "KEY_WRITE", - "Kevent", - "Kevent_t", - "Kill", - "Klogctl", - "Kqueue", - "LANG_ENGLISH", - "LAYERED_PROTOCOL", - "LCNT_OVERLOAD_FLUSH", - "LINUX_REBOOT_CMD_CAD_OFF", - "LINUX_REBOOT_CMD_CAD_ON", - "LINUX_REBOOT_CMD_HALT", - "LINUX_REBOOT_CMD_KEXEC", - "LINUX_REBOOT_CMD_POWER_OFF", - "LINUX_REBOOT_CMD_RESTART", - "LINUX_REBOOT_CMD_RESTART2", - "LINUX_REBOOT_CMD_SW_SUSPEND", - "LINUX_REBOOT_MAGIC1", - "LINUX_REBOOT_MAGIC2", - "LOCK_EX", - "LOCK_NB", - "LOCK_SH", - "LOCK_UN", - "LazyDLL", - "LazyProc", - "Lchown", - "Linger", - "Link", - "Listen", - "Listxattr", - "LoadCancelIoEx", - "LoadConnectEx", - "LoadCreateSymbolicLink", - "LoadDLL", - "LoadGetAddrInfo", - "LoadLibrary", - "LoadSetFileCompletionNotificationModes", - "LocalFree", - "Log2phys_t", - "LookupAccountName", - "LookupAccountSid", - "LookupSID", - "LsfJump", - "LsfSocket", - "LsfStmt", - "Lstat", - "MADV_AUTOSYNC", - "MADV_CAN_REUSE", - "MADV_CORE", - "MADV_DOFORK", - "MADV_DONTFORK", - "MADV_DONTNEED", - "MADV_FREE", - "MADV_FREE_REUSABLE", - "MADV_FREE_REUSE", - "MADV_HUGEPAGE", - "MADV_HWPOISON", - "MADV_MERGEABLE", - "MADV_NOCORE", - "MADV_NOHUGEPAGE", - "MADV_NORMAL", - "MADV_NOSYNC", - "MADV_PROTECT", - "MADV_RANDOM", - "MADV_REMOVE", - "MADV_SEQUENTIAL", - "MADV_SPACEAVAIL", - "MADV_UNMERGEABLE", - "MADV_WILLNEED", - "MADV_ZERO_WIRED_PAGES", - "MAP_32BIT", - "MAP_ALIGNED_SUPER", - "MAP_ALIGNMENT_16MB", - "MAP_ALIGNMENT_1TB", - "MAP_ALIGNMENT_256TB", - "MAP_ALIGNMENT_4GB", - "MAP_ALIGNMENT_64KB", - "MAP_ALIGNMENT_64PB", - "MAP_ALIGNMENT_MASK", - "MAP_ALIGNMENT_SHIFT", - "MAP_ANON", - "MAP_ANONYMOUS", - "MAP_COPY", - "MAP_DENYWRITE", - "MAP_EXECUTABLE", - "MAP_FILE", - "MAP_FIXED", - "MAP_FLAGMASK", - "MAP_GROWSDOWN", - "MAP_HASSEMAPHORE", - "MAP_HUGETLB", - "MAP_INHERIT", - "MAP_INHERIT_COPY", - "MAP_INHERIT_DEFAULT", - "MAP_INHERIT_DONATE_COPY", - "MAP_INHERIT_NONE", - "MAP_INHERIT_SHARE", - "MAP_JIT", - "MAP_LOCKED", - "MAP_NOCACHE", - "MAP_NOCORE", - "MAP_NOEXTEND", - "MAP_NONBLOCK", - "MAP_NORESERVE", - "MAP_NOSYNC", - "MAP_POPULATE", - "MAP_PREFAULT_READ", - "MAP_PRIVATE", - "MAP_RENAME", - "MAP_RESERVED0080", - "MAP_RESERVED0100", - "MAP_SHARED", - "MAP_STACK", - "MAP_TRYFIXED", - "MAP_TYPE", - "MAP_WIRED", - "MAXIMUM_REPARSE_DATA_BUFFER_SIZE", - "MAXLEN_IFDESCR", - "MAXLEN_PHYSADDR", - "MAX_ADAPTER_ADDRESS_LENGTH", - "MAX_ADAPTER_DESCRIPTION_LENGTH", - "MAX_ADAPTER_NAME_LENGTH", - "MAX_COMPUTERNAME_LENGTH", - "MAX_INTERFACE_NAME_LEN", - "MAX_LONG_PATH", - "MAX_PATH", - "MAX_PROTOCOL_CHAIN", - "MCL_CURRENT", - "MCL_FUTURE", - "MNT_DETACH", - "MNT_EXPIRE", - "MNT_FORCE", - "MSG_BCAST", - "MSG_CMSG_CLOEXEC", - "MSG_COMPAT", - "MSG_CONFIRM", - "MSG_CONTROLMBUF", - "MSG_CTRUNC", - "MSG_DONTROUTE", - "MSG_DONTWAIT", - "MSG_EOF", - "MSG_EOR", - "MSG_ERRQUEUE", - "MSG_FASTOPEN", - "MSG_FIN", - "MSG_FLUSH", - "MSG_HAVEMORE", - "MSG_HOLD", - "MSG_IOVUSRSPACE", - "MSG_LENUSRSPACE", - "MSG_MCAST", - "MSG_MORE", - "MSG_NAMEMBUF", - "MSG_NBIO", - "MSG_NEEDSA", - "MSG_NOSIGNAL", - "MSG_NOTIFICATION", - "MSG_OOB", - "MSG_PEEK", - "MSG_PROXY", - "MSG_RCVMORE", - "MSG_RST", - "MSG_SEND", - "MSG_SYN", - "MSG_TRUNC", - "MSG_TRYHARD", - "MSG_USERFLAGS", - "MSG_WAITALL", - "MSG_WAITFORONE", - "MSG_WAITSTREAM", - "MS_ACTIVE", - "MS_ASYNC", - "MS_BIND", - "MS_DEACTIVATE", - "MS_DIRSYNC", - "MS_INVALIDATE", - "MS_I_VERSION", - "MS_KERNMOUNT", - "MS_KILLPAGES", - "MS_MANDLOCK", - "MS_MGC_MSK", - "MS_MGC_VAL", - "MS_MOVE", - "MS_NOATIME", - "MS_NODEV", - "MS_NODIRATIME", - "MS_NOEXEC", - "MS_NOSUID", - "MS_NOUSER", - "MS_POSIXACL", - "MS_PRIVATE", - "MS_RDONLY", - "MS_REC", - "MS_RELATIME", - "MS_REMOUNT", - "MS_RMT_MASK", - "MS_SHARED", - "MS_SILENT", - "MS_SLAVE", - "MS_STRICTATIME", - "MS_SYNC", - "MS_SYNCHRONOUS", - "MS_UNBINDABLE", - "Madvise", - "MapViewOfFile", - "MaxTokenInfoClass", - "Mclpool", - "MibIfRow", - "Mkdir", - "Mkdirat", - "Mkfifo", - "Mknod", - "Mknodat", - "Mlock", - "Mlockall", - "Mmap", - "Mount", - "MoveFile", - "Mprotect", - "Msghdr", - "Munlock", - "Munlockall", - "Munmap", - "MustLoadDLL", - "NAME_MAX", - "NETLINK_ADD_MEMBERSHIP", - "NETLINK_AUDIT", - "NETLINK_BROADCAST_ERROR", - "NETLINK_CONNECTOR", - "NETLINK_DNRTMSG", - "NETLINK_DROP_MEMBERSHIP", - "NETLINK_ECRYPTFS", - "NETLINK_FIB_LOOKUP", - "NETLINK_FIREWALL", - "NETLINK_GENERIC", - "NETLINK_INET_DIAG", - "NETLINK_IP6_FW", - "NETLINK_ISCSI", - "NETLINK_KOBJECT_UEVENT", - "NETLINK_NETFILTER", - "NETLINK_NFLOG", - "NETLINK_NO_ENOBUFS", - "NETLINK_PKTINFO", - "NETLINK_RDMA", - "NETLINK_ROUTE", - "NETLINK_SCSITRANSPORT", - "NETLINK_SELINUX", - "NETLINK_UNUSED", - "NETLINK_USERSOCK", - "NETLINK_XFRM", - "NET_RT_DUMP", - "NET_RT_DUMP2", - "NET_RT_FLAGS", - "NET_RT_IFLIST", - "NET_RT_IFLIST2", - "NET_RT_IFLISTL", - "NET_RT_IFMALIST", - "NET_RT_MAXID", - "NET_RT_OIFLIST", - "NET_RT_OOIFLIST", - "NET_RT_STAT", - "NET_RT_STATS", - "NET_RT_TABLE", - "NET_RT_TRASH", - "NLA_ALIGNTO", - "NLA_F_NESTED", - "NLA_F_NET_BYTEORDER", - "NLA_HDRLEN", - "NLMSG_ALIGNTO", - "NLMSG_DONE", - "NLMSG_ERROR", - "NLMSG_HDRLEN", - "NLMSG_MIN_TYPE", - "NLMSG_NOOP", - "NLMSG_OVERRUN", - "NLM_F_ACK", - "NLM_F_APPEND", - "NLM_F_ATOMIC", - "NLM_F_CREATE", - "NLM_F_DUMP", - "NLM_F_ECHO", - "NLM_F_EXCL", - "NLM_F_MATCH", - "NLM_F_MULTI", - "NLM_F_REPLACE", - "NLM_F_REQUEST", - "NLM_F_ROOT", - "NOFLSH", - "NOTE_ABSOLUTE", - "NOTE_ATTRIB", - "NOTE_BACKGROUND", - "NOTE_CHILD", - "NOTE_CRITICAL", - "NOTE_DELETE", - "NOTE_EOF", - "NOTE_EXEC", - "NOTE_EXIT", - "NOTE_EXITSTATUS", - "NOTE_EXIT_CSERROR", - "NOTE_EXIT_DECRYPTFAIL", - "NOTE_EXIT_DETAIL", - "NOTE_EXIT_DETAIL_MASK", - "NOTE_EXIT_MEMORY", - "NOTE_EXIT_REPARENTED", - "NOTE_EXTEND", - "NOTE_FFAND", - "NOTE_FFCOPY", - "NOTE_FFCTRLMASK", - "NOTE_FFLAGSMASK", - "NOTE_FFNOP", - "NOTE_FFOR", - "NOTE_FORK", - "NOTE_LEEWAY", - "NOTE_LINK", - "NOTE_LOWAT", - "NOTE_NONE", - "NOTE_NSECONDS", - "NOTE_PCTRLMASK", - "NOTE_PDATAMASK", - "NOTE_REAP", - "NOTE_RENAME", - "NOTE_RESOURCEEND", - "NOTE_REVOKE", - "NOTE_SECONDS", - "NOTE_SIGNAL", - "NOTE_TRACK", - "NOTE_TRACKERR", - "NOTE_TRIGGER", - "NOTE_TRUNCATE", - "NOTE_USECONDS", - "NOTE_VM_ERROR", - "NOTE_VM_PRESSURE", - "NOTE_VM_PRESSURE_SUDDEN_TERMINATE", - "NOTE_VM_PRESSURE_TERMINATE", - "NOTE_WRITE", - "NameCanonical", - "NameCanonicalEx", - "NameDisplay", - "NameDnsDomain", - "NameFullyQualifiedDN", - "NameSamCompatible", - "NameServicePrincipal", - "NameUniqueId", - "NameUnknown", - "NameUserPrincipal", - "Nanosleep", - "NetApiBufferFree", - "NetGetJoinInformation", - "NetSetupDomainName", - "NetSetupUnjoined", - "NetSetupUnknownStatus", - "NetSetupWorkgroupName", - "NetUserGetInfo", - "NetlinkMessage", - "NetlinkRIB", - "NetlinkRouteAttr", - "NetlinkRouteRequest", - "NewCallback", - "NewCallbackCDecl", - "NewLazyDLL", - "NlAttr", - "NlMsgerr", - "NlMsghdr", - "NsecToFiletime", - "NsecToTimespec", - "NsecToTimeval", - "Ntohs", - "OCRNL", - "OFDEL", - "OFILL", - "OFIOGETBMAP", - "OID_PKIX_KP_SERVER_AUTH", - "OID_SERVER_GATED_CRYPTO", - "OID_SGC_NETSCAPE", - "OLCUC", - "ONLCR", - "ONLRET", - "ONOCR", - "ONOEOT", - "OPEN_ALWAYS", - "OPEN_EXISTING", - "OPOST", - "O_ACCMODE", - "O_ALERT", - "O_ALT_IO", - "O_APPEND", - "O_ASYNC", - "O_CLOEXEC", - "O_CREAT", - "O_DIRECT", - "O_DIRECTORY", - "O_DP_GETRAWENCRYPTED", - "O_DSYNC", - "O_EVTONLY", - "O_EXCL", - "O_EXEC", - "O_EXLOCK", - "O_FSYNC", - "O_LARGEFILE", - "O_NDELAY", - "O_NOATIME", - "O_NOCTTY", - "O_NOFOLLOW", - "O_NONBLOCK", - "O_NOSIGPIPE", - "O_POPUP", - "O_RDONLY", - "O_RDWR", - "O_RSYNC", - "O_SHLOCK", - "O_SYMLINK", - "O_SYNC", - "O_TRUNC", - "O_TTY_INIT", - "O_WRONLY", - "Open", - "OpenCurrentProcessToken", - "OpenProcess", - "OpenProcessToken", - "Openat", - "Overlapped", - "PACKET_ADD_MEMBERSHIP", - "PACKET_BROADCAST", - "PACKET_DROP_MEMBERSHIP", - "PACKET_FASTROUTE", - "PACKET_HOST", - "PACKET_LOOPBACK", - "PACKET_MR_ALLMULTI", - "PACKET_MR_MULTICAST", - "PACKET_MR_PROMISC", - "PACKET_MULTICAST", - "PACKET_OTHERHOST", - "PACKET_OUTGOING", - "PACKET_RECV_OUTPUT", - "PACKET_RX_RING", - "PACKET_STATISTICS", - "PAGE_EXECUTE_READ", - "PAGE_EXECUTE_READWRITE", - "PAGE_EXECUTE_WRITECOPY", - "PAGE_READONLY", - "PAGE_READWRITE", - "PAGE_WRITECOPY", - "PARENB", - "PARMRK", - "PARODD", - "PENDIN", - "PFL_HIDDEN", - "PFL_MATCHES_PROTOCOL_ZERO", - "PFL_MULTIPLE_PROTO_ENTRIES", - "PFL_NETWORKDIRECT_PROVIDER", - "PFL_RECOMMENDED_PROTO_ENTRY", - "PF_FLUSH", - "PKCS_7_ASN_ENCODING", - "PMC5_PIPELINE_FLUSH", - "PRIO_PGRP", - "PRIO_PROCESS", - "PRIO_USER", - "PRI_IOFLUSH", - "PROCESS_QUERY_INFORMATION", - "PROCESS_TERMINATE", - "PROT_EXEC", - "PROT_GROWSDOWN", - "PROT_GROWSUP", - "PROT_NONE", - "PROT_READ", - "PROT_WRITE", - "PROV_DH_SCHANNEL", - "PROV_DSS", - "PROV_DSS_DH", - "PROV_EC_ECDSA_FULL", - "PROV_EC_ECDSA_SIG", - "PROV_EC_ECNRA_FULL", - "PROV_EC_ECNRA_SIG", - "PROV_FORTEZZA", - "PROV_INTEL_SEC", - "PROV_MS_EXCHANGE", - "PROV_REPLACE_OWF", - "PROV_RNG", - "PROV_RSA_AES", - "PROV_RSA_FULL", - "PROV_RSA_SCHANNEL", - "PROV_RSA_SIG", - "PROV_SPYRUS_LYNKS", - "PROV_SSL", - "PR_CAPBSET_DROP", - "PR_CAPBSET_READ", - "PR_CLEAR_SECCOMP_FILTER", - "PR_ENDIAN_BIG", - "PR_ENDIAN_LITTLE", - "PR_ENDIAN_PPC_LITTLE", - "PR_FPEMU_NOPRINT", - "PR_FPEMU_SIGFPE", - "PR_FP_EXC_ASYNC", - "PR_FP_EXC_DISABLED", - "PR_FP_EXC_DIV", - "PR_FP_EXC_INV", - "PR_FP_EXC_NONRECOV", - "PR_FP_EXC_OVF", - "PR_FP_EXC_PRECISE", - "PR_FP_EXC_RES", - "PR_FP_EXC_SW_ENABLE", - "PR_FP_EXC_UND", - "PR_GET_DUMPABLE", - "PR_GET_ENDIAN", - "PR_GET_FPEMU", - "PR_GET_FPEXC", - "PR_GET_KEEPCAPS", - "PR_GET_NAME", - "PR_GET_PDEATHSIG", - "PR_GET_SECCOMP", - "PR_GET_SECCOMP_FILTER", - "PR_GET_SECUREBITS", - "PR_GET_TIMERSLACK", - "PR_GET_TIMING", - "PR_GET_TSC", - "PR_GET_UNALIGN", - "PR_MCE_KILL", - "PR_MCE_KILL_CLEAR", - "PR_MCE_KILL_DEFAULT", - "PR_MCE_KILL_EARLY", - "PR_MCE_KILL_GET", - "PR_MCE_KILL_LATE", - "PR_MCE_KILL_SET", - "PR_SECCOMP_FILTER_EVENT", - "PR_SECCOMP_FILTER_SYSCALL", - "PR_SET_DUMPABLE", - "PR_SET_ENDIAN", - "PR_SET_FPEMU", - "PR_SET_FPEXC", - "PR_SET_KEEPCAPS", - "PR_SET_NAME", - "PR_SET_PDEATHSIG", - "PR_SET_PTRACER", - "PR_SET_SECCOMP", - "PR_SET_SECCOMP_FILTER", - "PR_SET_SECUREBITS", - "PR_SET_TIMERSLACK", - "PR_SET_TIMING", - "PR_SET_TSC", - "PR_SET_UNALIGN", - "PR_TASK_PERF_EVENTS_DISABLE", - "PR_TASK_PERF_EVENTS_ENABLE", - "PR_TIMING_STATISTICAL", - "PR_TIMING_TIMESTAMP", - "PR_TSC_ENABLE", - "PR_TSC_SIGSEGV", - "PR_UNALIGN_NOPRINT", - "PR_UNALIGN_SIGBUS", - "PTRACE_ARCH_PRCTL", - "PTRACE_ATTACH", - "PTRACE_CONT", - "PTRACE_DETACH", - "PTRACE_EVENT_CLONE", - "PTRACE_EVENT_EXEC", - "PTRACE_EVENT_EXIT", - "PTRACE_EVENT_FORK", - "PTRACE_EVENT_VFORK", - "PTRACE_EVENT_VFORK_DONE", - "PTRACE_GETCRUNCHREGS", - "PTRACE_GETEVENTMSG", - "PTRACE_GETFPREGS", - "PTRACE_GETFPXREGS", - "PTRACE_GETHBPREGS", - "PTRACE_GETREGS", - "PTRACE_GETREGSET", - "PTRACE_GETSIGINFO", - "PTRACE_GETVFPREGS", - "PTRACE_GETWMMXREGS", - "PTRACE_GET_THREAD_AREA", - "PTRACE_KILL", - "PTRACE_OLDSETOPTIONS", - "PTRACE_O_MASK", - "PTRACE_O_TRACECLONE", - "PTRACE_O_TRACEEXEC", - "PTRACE_O_TRACEEXIT", - "PTRACE_O_TRACEFORK", - "PTRACE_O_TRACESYSGOOD", - "PTRACE_O_TRACEVFORK", - "PTRACE_O_TRACEVFORKDONE", - "PTRACE_PEEKDATA", - "PTRACE_PEEKTEXT", - "PTRACE_PEEKUSR", - "PTRACE_POKEDATA", - "PTRACE_POKETEXT", - "PTRACE_POKEUSR", - "PTRACE_SETCRUNCHREGS", - "PTRACE_SETFPREGS", - "PTRACE_SETFPXREGS", - "PTRACE_SETHBPREGS", - "PTRACE_SETOPTIONS", - "PTRACE_SETREGS", - "PTRACE_SETREGSET", - "PTRACE_SETSIGINFO", - "PTRACE_SETVFPREGS", - "PTRACE_SETWMMXREGS", - "PTRACE_SET_SYSCALL", - "PTRACE_SET_THREAD_AREA", - "PTRACE_SINGLEBLOCK", - "PTRACE_SINGLESTEP", - "PTRACE_SYSCALL", - "PTRACE_SYSEMU", - "PTRACE_SYSEMU_SINGLESTEP", - "PTRACE_TRACEME", - "PT_ATTACH", - "PT_ATTACHEXC", - "PT_CONTINUE", - "PT_DATA_ADDR", - "PT_DENY_ATTACH", - "PT_DETACH", - "PT_FIRSTMACH", - "PT_FORCEQUOTA", - "PT_KILL", - "PT_MASK", - "PT_READ_D", - "PT_READ_I", - "PT_READ_U", - "PT_SIGEXC", - "PT_STEP", - "PT_TEXT_ADDR", - "PT_TEXT_END_ADDR", - "PT_THUPDATE", - "PT_TRACE_ME", - "PT_WRITE_D", - "PT_WRITE_I", - "PT_WRITE_U", - "ParseDirent", - "ParseNetlinkMessage", - "ParseNetlinkRouteAttr", - "ParseRoutingMessage", - "ParseRoutingSockaddr", - "ParseSocketControlMessage", - "ParseUnixCredentials", - "ParseUnixRights", - "PathMax", - "Pathconf", - "Pause", - "Pipe", - "Pipe2", - "PivotRoot", - "Pointer", - "PostQueuedCompletionStatus", - "Pread", - "Proc", - "ProcAttr", - "Process32First", - "Process32Next", - "ProcessEntry32", - "ProcessInformation", - "Protoent", - "PtraceAttach", - "PtraceCont", - "PtraceDetach", - "PtraceGetEventMsg", - "PtraceGetRegs", - "PtracePeekData", - "PtracePeekText", - "PtracePokeData", - "PtracePokeText", - "PtraceRegs", - "PtraceSetOptions", - "PtraceSetRegs", - "PtraceSingleStep", - "PtraceSyscall", - "Pwrite", - "REG_BINARY", - "REG_DWORD", - "REG_DWORD_BIG_ENDIAN", - "REG_DWORD_LITTLE_ENDIAN", - "REG_EXPAND_SZ", - "REG_FULL_RESOURCE_DESCRIPTOR", - "REG_LINK", - "REG_MULTI_SZ", - "REG_NONE", - "REG_QWORD", - "REG_QWORD_LITTLE_ENDIAN", - "REG_RESOURCE_LIST", - "REG_RESOURCE_REQUIREMENTS_LIST", - "REG_SZ", - "RLIMIT_AS", - "RLIMIT_CORE", - "RLIMIT_CPU", - "RLIMIT_CPU_USAGE_MONITOR", - "RLIMIT_DATA", - "RLIMIT_FSIZE", - "RLIMIT_NOFILE", - "RLIMIT_STACK", - "RLIM_INFINITY", - "RTAX_ADVMSS", - "RTAX_AUTHOR", - "RTAX_BRD", - "RTAX_CWND", - "RTAX_DST", - "RTAX_FEATURES", - "RTAX_FEATURE_ALLFRAG", - "RTAX_FEATURE_ECN", - "RTAX_FEATURE_SACK", - "RTAX_FEATURE_TIMESTAMP", - "RTAX_GATEWAY", - "RTAX_GENMASK", - "RTAX_HOPLIMIT", - "RTAX_IFA", - "RTAX_IFP", - "RTAX_INITCWND", - "RTAX_INITRWND", - "RTAX_LABEL", - "RTAX_LOCK", - "RTAX_MAX", - "RTAX_MTU", - "RTAX_NETMASK", - "RTAX_REORDERING", - "RTAX_RTO_MIN", - "RTAX_RTT", - "RTAX_RTTVAR", - "RTAX_SRC", - "RTAX_SRCMASK", - "RTAX_SSTHRESH", - "RTAX_TAG", - "RTAX_UNSPEC", - "RTAX_WINDOW", - "RTA_ALIGNTO", - "RTA_AUTHOR", - "RTA_BRD", - "RTA_CACHEINFO", - "RTA_DST", - "RTA_FLOW", - "RTA_GATEWAY", - "RTA_GENMASK", - "RTA_IFA", - "RTA_IFP", - "RTA_IIF", - "RTA_LABEL", - "RTA_MAX", - "RTA_METRICS", - "RTA_MULTIPATH", - "RTA_NETMASK", - "RTA_OIF", - "RTA_PREFSRC", - "RTA_PRIORITY", - "RTA_SRC", - "RTA_SRCMASK", - "RTA_TABLE", - "RTA_TAG", - "RTA_UNSPEC", - "RTCF_DIRECTSRC", - "RTCF_DOREDIRECT", - "RTCF_LOG", - "RTCF_MASQ", - "RTCF_NAT", - "RTCF_VALVE", - "RTF_ADDRCLASSMASK", - "RTF_ADDRCONF", - "RTF_ALLONLINK", - "RTF_ANNOUNCE", - "RTF_BLACKHOLE", - "RTF_BROADCAST", - "RTF_CACHE", - "RTF_CLONED", - "RTF_CLONING", - "RTF_CONDEMNED", - "RTF_DEFAULT", - "RTF_DELCLONE", - "RTF_DONE", - "RTF_DYNAMIC", - "RTF_FLOW", - "RTF_FMASK", - "RTF_GATEWAY", - "RTF_GWFLAG_COMPAT", - "RTF_HOST", - "RTF_IFREF", - "RTF_IFSCOPE", - "RTF_INTERFACE", - "RTF_IRTT", - "RTF_LINKRT", - "RTF_LLDATA", - "RTF_LLINFO", - "RTF_LOCAL", - "RTF_MASK", - "RTF_MODIFIED", - "RTF_MPATH", - "RTF_MPLS", - "RTF_MSS", - "RTF_MTU", - "RTF_MULTICAST", - "RTF_NAT", - "RTF_NOFORWARD", - "RTF_NONEXTHOP", - "RTF_NOPMTUDISC", - "RTF_PERMANENT_ARP", - "RTF_PINNED", - "RTF_POLICY", - "RTF_PRCLONING", - "RTF_PROTO1", - "RTF_PROTO2", - "RTF_PROTO3", - "RTF_PROXY", - "RTF_REINSTATE", - "RTF_REJECT", - "RTF_RNH_LOCKED", - "RTF_ROUTER", - "RTF_SOURCE", - "RTF_SRC", - "RTF_STATIC", - "RTF_STICKY", - "RTF_THROW", - "RTF_TUNNEL", - "RTF_UP", - "RTF_USETRAILERS", - "RTF_WASCLONED", - "RTF_WINDOW", - "RTF_XRESOLVE", - "RTM_ADD", - "RTM_BASE", - "RTM_CHANGE", - "RTM_CHGADDR", - "RTM_DELACTION", - "RTM_DELADDR", - "RTM_DELADDRLABEL", - "RTM_DELETE", - "RTM_DELLINK", - "RTM_DELMADDR", - "RTM_DELNEIGH", - "RTM_DELQDISC", - "RTM_DELROUTE", - "RTM_DELRULE", - "RTM_DELTCLASS", - "RTM_DELTFILTER", - "RTM_DESYNC", - "RTM_F_CLONED", - "RTM_F_EQUALIZE", - "RTM_F_NOTIFY", - "RTM_F_PREFIX", - "RTM_GET", - "RTM_GET2", - "RTM_GETACTION", - "RTM_GETADDR", - "RTM_GETADDRLABEL", - "RTM_GETANYCAST", - "RTM_GETDCB", - "RTM_GETLINK", - "RTM_GETMULTICAST", - "RTM_GETNEIGH", - "RTM_GETNEIGHTBL", - "RTM_GETQDISC", - "RTM_GETROUTE", - "RTM_GETRULE", - "RTM_GETTCLASS", - "RTM_GETTFILTER", - "RTM_IEEE80211", - "RTM_IFANNOUNCE", - "RTM_IFINFO", - "RTM_IFINFO2", - "RTM_LLINFO_UPD", - "RTM_LOCK", - "RTM_LOSING", - "RTM_MAX", - "RTM_MAXSIZE", - "RTM_MISS", - "RTM_NEWACTION", - "RTM_NEWADDR", - "RTM_NEWADDRLABEL", - "RTM_NEWLINK", - "RTM_NEWMADDR", - "RTM_NEWMADDR2", - "RTM_NEWNDUSEROPT", - "RTM_NEWNEIGH", - "RTM_NEWNEIGHTBL", - "RTM_NEWPREFIX", - "RTM_NEWQDISC", - "RTM_NEWROUTE", - "RTM_NEWRULE", - "RTM_NEWTCLASS", - "RTM_NEWTFILTER", - "RTM_NR_FAMILIES", - "RTM_NR_MSGTYPES", - "RTM_OIFINFO", - "RTM_OLDADD", - "RTM_OLDDEL", - "RTM_OOIFINFO", - "RTM_REDIRECT", - "RTM_RESOLVE", - "RTM_RTTUNIT", - "RTM_SETDCB", - "RTM_SETGATE", - "RTM_SETLINK", - "RTM_SETNEIGHTBL", - "RTM_VERSION", - "RTNH_ALIGNTO", - "RTNH_F_DEAD", - "RTNH_F_ONLINK", - "RTNH_F_PERVASIVE", - "RTNLGRP_IPV4_IFADDR", - "RTNLGRP_IPV4_MROUTE", - "RTNLGRP_IPV4_ROUTE", - "RTNLGRP_IPV4_RULE", - "RTNLGRP_IPV6_IFADDR", - "RTNLGRP_IPV6_IFINFO", - "RTNLGRP_IPV6_MROUTE", - "RTNLGRP_IPV6_PREFIX", - "RTNLGRP_IPV6_ROUTE", - "RTNLGRP_IPV6_RULE", - "RTNLGRP_LINK", - "RTNLGRP_ND_USEROPT", - "RTNLGRP_NEIGH", - "RTNLGRP_NONE", - "RTNLGRP_NOTIFY", - "RTNLGRP_TC", - "RTN_ANYCAST", - "RTN_BLACKHOLE", - "RTN_BROADCAST", - "RTN_LOCAL", - "RTN_MAX", - "RTN_MULTICAST", - "RTN_NAT", - "RTN_PROHIBIT", - "RTN_THROW", - "RTN_UNICAST", - "RTN_UNREACHABLE", - "RTN_UNSPEC", - "RTN_XRESOLVE", - "RTPROT_BIRD", - "RTPROT_BOOT", - "RTPROT_DHCP", - "RTPROT_DNROUTED", - "RTPROT_GATED", - "RTPROT_KERNEL", - "RTPROT_MRT", - "RTPROT_NTK", - "RTPROT_RA", - "RTPROT_REDIRECT", - "RTPROT_STATIC", - "RTPROT_UNSPEC", - "RTPROT_XORP", - "RTPROT_ZEBRA", - "RTV_EXPIRE", - "RTV_HOPCOUNT", - "RTV_MTU", - "RTV_RPIPE", - "RTV_RTT", - "RTV_RTTVAR", - "RTV_SPIPE", - "RTV_SSTHRESH", - "RTV_WEIGHT", - "RT_CACHING_CONTEXT", - "RT_CLASS_DEFAULT", - "RT_CLASS_LOCAL", - "RT_CLASS_MAIN", - "RT_CLASS_MAX", - "RT_CLASS_UNSPEC", - "RT_DEFAULT_FIB", - "RT_NORTREF", - "RT_SCOPE_HOST", - "RT_SCOPE_LINK", - "RT_SCOPE_NOWHERE", - "RT_SCOPE_SITE", - "RT_SCOPE_UNIVERSE", - "RT_TABLEID_MAX", - "RT_TABLE_COMPAT", - "RT_TABLE_DEFAULT", - "RT_TABLE_LOCAL", - "RT_TABLE_MAIN", - "RT_TABLE_MAX", - "RT_TABLE_UNSPEC", - "RUSAGE_CHILDREN", - "RUSAGE_SELF", - "RUSAGE_THREAD", - "Radvisory_t", - "RawConn", - "RawSockaddr", - "RawSockaddrAny", - "RawSockaddrDatalink", - "RawSockaddrInet4", - "RawSockaddrInet6", - "RawSockaddrLinklayer", - "RawSockaddrNetlink", - "RawSockaddrUnix", - "RawSyscall", - "RawSyscall6", - "Read", - "ReadConsole", - "ReadDirectoryChanges", - "ReadDirent", - "ReadFile", - "Readlink", - "Reboot", - "Recvfrom", - "Recvmsg", - "RegCloseKey", - "RegEnumKeyEx", - "RegOpenKeyEx", - "RegQueryInfoKey", - "RegQueryValueEx", - "RemoveDirectory", - "Removexattr", - "Rename", - "Renameat", - "Revoke", - "Rlimit", - "Rmdir", - "RouteMessage", - "RouteRIB", - "RoutingMessage", - "RtAttr", - "RtGenmsg", - "RtMetrics", - "RtMsg", - "RtMsghdr", - "RtNexthop", - "Rusage", - "SCM_BINTIME", - "SCM_CREDENTIALS", - "SCM_CREDS", - "SCM_RIGHTS", - "SCM_TIMESTAMP", - "SCM_TIMESTAMPING", - "SCM_TIMESTAMPNS", - "SCM_TIMESTAMP_MONOTONIC", - "SHUT_RD", - "SHUT_RDWR", - "SHUT_WR", - "SID", - "SIDAndAttributes", - "SIGABRT", - "SIGALRM", - "SIGBUS", - "SIGCHLD", - "SIGCLD", - "SIGCONT", - "SIGEMT", - "SIGFPE", - "SIGHUP", - "SIGILL", - "SIGINFO", - "SIGINT", - "SIGIO", - "SIGIOT", - "SIGKILL", - "SIGLIBRT", - "SIGLWP", - "SIGPIPE", - "SIGPOLL", - "SIGPROF", - "SIGPWR", - "SIGQUIT", - "SIGSEGV", - "SIGSTKFLT", - "SIGSTOP", - "SIGSYS", - "SIGTERM", - "SIGTHR", - "SIGTRAP", - "SIGTSTP", - "SIGTTIN", - "SIGTTOU", - "SIGUNUSED", - "SIGURG", - "SIGUSR1", - "SIGUSR2", - "SIGVTALRM", - "SIGWINCH", - "SIGXCPU", - "SIGXFSZ", - "SIOCADDDLCI", - "SIOCADDMULTI", - "SIOCADDRT", - "SIOCAIFADDR", - "SIOCAIFGROUP", - "SIOCALIFADDR", - "SIOCARPIPLL", - "SIOCATMARK", - "SIOCAUTOADDR", - "SIOCAUTONETMASK", - "SIOCBRDGADD", - "SIOCBRDGADDS", - "SIOCBRDGARL", - "SIOCBRDGDADDR", - "SIOCBRDGDEL", - "SIOCBRDGDELS", - "SIOCBRDGFLUSH", - "SIOCBRDGFRL", - "SIOCBRDGGCACHE", - "SIOCBRDGGFD", - "SIOCBRDGGHT", - "SIOCBRDGGIFFLGS", - "SIOCBRDGGMA", - "SIOCBRDGGPARAM", - "SIOCBRDGGPRI", - "SIOCBRDGGRL", - "SIOCBRDGGSIFS", - "SIOCBRDGGTO", - "SIOCBRDGIFS", - "SIOCBRDGRTS", - "SIOCBRDGSADDR", - "SIOCBRDGSCACHE", - "SIOCBRDGSFD", - "SIOCBRDGSHT", - "SIOCBRDGSIFCOST", - "SIOCBRDGSIFFLGS", - "SIOCBRDGSIFPRIO", - "SIOCBRDGSMA", - "SIOCBRDGSPRI", - "SIOCBRDGSPROTO", - "SIOCBRDGSTO", - "SIOCBRDGSTXHC", - "SIOCDARP", - "SIOCDELDLCI", - "SIOCDELMULTI", - "SIOCDELRT", - "SIOCDEVPRIVATE", - "SIOCDIFADDR", - "SIOCDIFGROUP", - "SIOCDIFPHYADDR", - "SIOCDLIFADDR", - "SIOCDRARP", - "SIOCGARP", - "SIOCGDRVSPEC", - "SIOCGETKALIVE", - "SIOCGETLABEL", - "SIOCGETPFLOW", - "SIOCGETPFSYNC", - "SIOCGETSGCNT", - "SIOCGETVIFCNT", - "SIOCGETVLAN", - "SIOCGHIWAT", - "SIOCGIFADDR", - "SIOCGIFADDRPREF", - "SIOCGIFALIAS", - "SIOCGIFALTMTU", - "SIOCGIFASYNCMAP", - "SIOCGIFBOND", - "SIOCGIFBR", - "SIOCGIFBRDADDR", - "SIOCGIFCAP", - "SIOCGIFCONF", - "SIOCGIFCOUNT", - "SIOCGIFDATA", - "SIOCGIFDESCR", - "SIOCGIFDEVMTU", - "SIOCGIFDLT", - "SIOCGIFDSTADDR", - "SIOCGIFENCAP", - "SIOCGIFFIB", - "SIOCGIFFLAGS", - "SIOCGIFGATTR", - "SIOCGIFGENERIC", - "SIOCGIFGMEMB", - "SIOCGIFGROUP", - "SIOCGIFHARDMTU", - "SIOCGIFHWADDR", - "SIOCGIFINDEX", - "SIOCGIFKPI", - "SIOCGIFMAC", - "SIOCGIFMAP", - "SIOCGIFMEDIA", - "SIOCGIFMEM", - "SIOCGIFMETRIC", - "SIOCGIFMTU", - "SIOCGIFNAME", - "SIOCGIFNETMASK", - "SIOCGIFPDSTADDR", - "SIOCGIFPFLAGS", - "SIOCGIFPHYS", - "SIOCGIFPRIORITY", - "SIOCGIFPSRCADDR", - "SIOCGIFRDOMAIN", - "SIOCGIFRTLABEL", - "SIOCGIFSLAVE", - "SIOCGIFSTATUS", - "SIOCGIFTIMESLOT", - "SIOCGIFTXQLEN", - "SIOCGIFVLAN", - "SIOCGIFWAKEFLAGS", - "SIOCGIFXFLAGS", - "SIOCGLIFADDR", - "SIOCGLIFPHYADDR", - "SIOCGLIFPHYRTABLE", - "SIOCGLIFPHYTTL", - "SIOCGLINKSTR", - "SIOCGLOWAT", - "SIOCGPGRP", - "SIOCGPRIVATE_0", - "SIOCGPRIVATE_1", - "SIOCGRARP", - "SIOCGSPPPPARAMS", - "SIOCGSTAMP", - "SIOCGSTAMPNS", - "SIOCGVH", - "SIOCGVNETID", - "SIOCIFCREATE", - "SIOCIFCREATE2", - "SIOCIFDESTROY", - "SIOCIFGCLONERS", - "SIOCINITIFADDR", - "SIOCPROTOPRIVATE", - "SIOCRSLVMULTI", - "SIOCRTMSG", - "SIOCSARP", - "SIOCSDRVSPEC", - "SIOCSETKALIVE", - "SIOCSETLABEL", - "SIOCSETPFLOW", - "SIOCSETPFSYNC", - "SIOCSETVLAN", - "SIOCSHIWAT", - "SIOCSIFADDR", - "SIOCSIFADDRPREF", - "SIOCSIFALTMTU", - "SIOCSIFASYNCMAP", - "SIOCSIFBOND", - "SIOCSIFBR", - "SIOCSIFBRDADDR", - "SIOCSIFCAP", - "SIOCSIFDESCR", - "SIOCSIFDSTADDR", - "SIOCSIFENCAP", - "SIOCSIFFIB", - "SIOCSIFFLAGS", - "SIOCSIFGATTR", - "SIOCSIFGENERIC", - "SIOCSIFHWADDR", - "SIOCSIFHWBROADCAST", - "SIOCSIFKPI", - "SIOCSIFLINK", - "SIOCSIFLLADDR", - "SIOCSIFMAC", - "SIOCSIFMAP", - "SIOCSIFMEDIA", - "SIOCSIFMEM", - "SIOCSIFMETRIC", - "SIOCSIFMTU", - "SIOCSIFNAME", - "SIOCSIFNETMASK", - "SIOCSIFPFLAGS", - "SIOCSIFPHYADDR", - "SIOCSIFPHYS", - "SIOCSIFPRIORITY", - "SIOCSIFRDOMAIN", - "SIOCSIFRTLABEL", - "SIOCSIFRVNET", - "SIOCSIFSLAVE", - "SIOCSIFTIMESLOT", - "SIOCSIFTXQLEN", - "SIOCSIFVLAN", - "SIOCSIFVNET", - "SIOCSIFXFLAGS", - "SIOCSLIFPHYADDR", - "SIOCSLIFPHYRTABLE", - "SIOCSLIFPHYTTL", - "SIOCSLINKSTR", - "SIOCSLOWAT", - "SIOCSPGRP", - "SIOCSRARP", - "SIOCSSPPPPARAMS", - "SIOCSVH", - "SIOCSVNETID", - "SIOCZIFDATA", - "SIO_GET_EXTENSION_FUNCTION_POINTER", - "SIO_GET_INTERFACE_LIST", - "SIO_KEEPALIVE_VALS", - "SIO_UDP_CONNRESET", - "SOCK_CLOEXEC", - "SOCK_DCCP", - "SOCK_DGRAM", - "SOCK_FLAGS_MASK", - "SOCK_MAXADDRLEN", - "SOCK_NONBLOCK", - "SOCK_NOSIGPIPE", - "SOCK_PACKET", - "SOCK_RAW", - "SOCK_RDM", - "SOCK_SEQPACKET", - "SOCK_STREAM", - "SOL_AAL", - "SOL_ATM", - "SOL_DECNET", - "SOL_ICMPV6", - "SOL_IP", - "SOL_IPV6", - "SOL_IRDA", - "SOL_PACKET", - "SOL_RAW", - "SOL_SOCKET", - "SOL_TCP", - "SOL_X25", - "SOMAXCONN", - "SO_ACCEPTCONN", - "SO_ACCEPTFILTER", - "SO_ATTACH_FILTER", - "SO_BINDANY", - "SO_BINDTODEVICE", - "SO_BINTIME", - "SO_BROADCAST", - "SO_BSDCOMPAT", - "SO_DEBUG", - "SO_DETACH_FILTER", - "SO_DOMAIN", - "SO_DONTROUTE", - "SO_DONTTRUNC", - "SO_ERROR", - "SO_KEEPALIVE", - "SO_LABEL", - "SO_LINGER", - "SO_LINGER_SEC", - "SO_LISTENINCQLEN", - "SO_LISTENQLEN", - "SO_LISTENQLIMIT", - "SO_MARK", - "SO_NETPROC", - "SO_NKE", - "SO_NOADDRERR", - "SO_NOHEADER", - "SO_NOSIGPIPE", - "SO_NOTIFYCONFLICT", - "SO_NO_CHECK", - "SO_NO_DDP", - "SO_NO_OFFLOAD", - "SO_NP_EXTENSIONS", - "SO_NREAD", - "SO_NUMRCVPKT", - "SO_NWRITE", - "SO_OOBINLINE", - "SO_OVERFLOWED", - "SO_PASSCRED", - "SO_PASSSEC", - "SO_PEERCRED", - "SO_PEERLABEL", - "SO_PEERNAME", - "SO_PEERSEC", - "SO_PRIORITY", - "SO_PROTOCOL", - "SO_PROTOTYPE", - "SO_RANDOMPORT", - "SO_RCVBUF", - "SO_RCVBUFFORCE", - "SO_RCVLOWAT", - "SO_RCVTIMEO", - "SO_RESTRICTIONS", - "SO_RESTRICT_DENYIN", - "SO_RESTRICT_DENYOUT", - "SO_RESTRICT_DENYSET", - "SO_REUSEADDR", - "SO_REUSEPORT", - "SO_REUSESHAREUID", - "SO_RTABLE", - "SO_RXQ_OVFL", - "SO_SECURITY_AUTHENTICATION", - "SO_SECURITY_ENCRYPTION_NETWORK", - "SO_SECURITY_ENCRYPTION_TRANSPORT", - "SO_SETFIB", - "SO_SNDBUF", - "SO_SNDBUFFORCE", - "SO_SNDLOWAT", - "SO_SNDTIMEO", - "SO_SPLICE", - "SO_TIMESTAMP", - "SO_TIMESTAMPING", - "SO_TIMESTAMPNS", - "SO_TIMESTAMP_MONOTONIC", - "SO_TYPE", - "SO_UPCALLCLOSEWAIT", - "SO_UPDATE_ACCEPT_CONTEXT", - "SO_UPDATE_CONNECT_CONTEXT", - "SO_USELOOPBACK", - "SO_USER_COOKIE", - "SO_VENDOR", - "SO_WANTMORE", - "SO_WANTOOBFLAG", - "SSLExtraCertChainPolicyPara", - "STANDARD_RIGHTS_ALL", - "STANDARD_RIGHTS_EXECUTE", - "STANDARD_RIGHTS_READ", - "STANDARD_RIGHTS_REQUIRED", - "STANDARD_RIGHTS_WRITE", - "STARTF_USESHOWWINDOW", - "STARTF_USESTDHANDLES", - "STD_ERROR_HANDLE", - "STD_INPUT_HANDLE", - "STD_OUTPUT_HANDLE", - "SUBLANG_ENGLISH_US", - "SW_FORCEMINIMIZE", - "SW_HIDE", - "SW_MAXIMIZE", - "SW_MINIMIZE", - "SW_NORMAL", - "SW_RESTORE", - "SW_SHOW", - "SW_SHOWDEFAULT", - "SW_SHOWMAXIMIZED", - "SW_SHOWMINIMIZED", - "SW_SHOWMINNOACTIVE", - "SW_SHOWNA", - "SW_SHOWNOACTIVATE", - "SW_SHOWNORMAL", - "SYMBOLIC_LINK_FLAG_DIRECTORY", - "SYNCHRONIZE", - "SYSCTL_VERSION", - "SYSCTL_VERS_0", - "SYSCTL_VERS_1", - "SYSCTL_VERS_MASK", - "SYS_ABORT2", - "SYS_ACCEPT", - "SYS_ACCEPT4", - "SYS_ACCEPT_NOCANCEL", - "SYS_ACCESS", - "SYS_ACCESS_EXTENDED", - "SYS_ACCT", - "SYS_ADD_KEY", - "SYS_ADD_PROFIL", - "SYS_ADJFREQ", - "SYS_ADJTIME", - "SYS_ADJTIMEX", - "SYS_AFS_SYSCALL", - "SYS_AIO_CANCEL", - "SYS_AIO_ERROR", - "SYS_AIO_FSYNC", - "SYS_AIO_MLOCK", - "SYS_AIO_READ", - "SYS_AIO_RETURN", - "SYS_AIO_SUSPEND", - "SYS_AIO_SUSPEND_NOCANCEL", - "SYS_AIO_WAITCOMPLETE", - "SYS_AIO_WRITE", - "SYS_ALARM", - "SYS_ARCH_PRCTL", - "SYS_ARM_FADVISE64_64", - "SYS_ARM_SYNC_FILE_RANGE", - "SYS_ATGETMSG", - "SYS_ATPGETREQ", - "SYS_ATPGETRSP", - "SYS_ATPSNDREQ", - "SYS_ATPSNDRSP", - "SYS_ATPUTMSG", - "SYS_ATSOCKET", - "SYS_AUDIT", - "SYS_AUDITCTL", - "SYS_AUDITON", - "SYS_AUDIT_SESSION_JOIN", - "SYS_AUDIT_SESSION_PORT", - "SYS_AUDIT_SESSION_SELF", - "SYS_BDFLUSH", - "SYS_BIND", - "SYS_BINDAT", - "SYS_BREAK", - "SYS_BRK", - "SYS_BSDTHREAD_CREATE", - "SYS_BSDTHREAD_REGISTER", - "SYS_BSDTHREAD_TERMINATE", - "SYS_CAPGET", - "SYS_CAPSET", - "SYS_CAP_ENTER", - "SYS_CAP_FCNTLS_GET", - "SYS_CAP_FCNTLS_LIMIT", - "SYS_CAP_GETMODE", - "SYS_CAP_GETRIGHTS", - "SYS_CAP_IOCTLS_GET", - "SYS_CAP_IOCTLS_LIMIT", - "SYS_CAP_NEW", - "SYS_CAP_RIGHTS_GET", - "SYS_CAP_RIGHTS_LIMIT", - "SYS_CHDIR", - "SYS_CHFLAGS", - "SYS_CHFLAGSAT", - "SYS_CHMOD", - "SYS_CHMOD_EXTENDED", - "SYS_CHOWN", - "SYS_CHOWN32", - "SYS_CHROOT", - "SYS_CHUD", - "SYS_CLOCK_ADJTIME", - "SYS_CLOCK_GETCPUCLOCKID2", - "SYS_CLOCK_GETRES", - "SYS_CLOCK_GETTIME", - "SYS_CLOCK_NANOSLEEP", - "SYS_CLOCK_SETTIME", - "SYS_CLONE", - "SYS_CLOSE", - "SYS_CLOSEFROM", - "SYS_CLOSE_NOCANCEL", - "SYS_CONNECT", - "SYS_CONNECTAT", - "SYS_CONNECT_NOCANCEL", - "SYS_COPYFILE", - "SYS_CPUSET", - "SYS_CPUSET_GETAFFINITY", - "SYS_CPUSET_GETID", - "SYS_CPUSET_SETAFFINITY", - "SYS_CPUSET_SETID", - "SYS_CREAT", - "SYS_CREATE_MODULE", - "SYS_CSOPS", - "SYS_CSOPS_AUDITTOKEN", - "SYS_DELETE", - "SYS_DELETE_MODULE", - "SYS_DUP", - "SYS_DUP2", - "SYS_DUP3", - "SYS_EACCESS", - "SYS_EPOLL_CREATE", - "SYS_EPOLL_CREATE1", - "SYS_EPOLL_CTL", - "SYS_EPOLL_CTL_OLD", - "SYS_EPOLL_PWAIT", - "SYS_EPOLL_WAIT", - "SYS_EPOLL_WAIT_OLD", - "SYS_EVENTFD", - "SYS_EVENTFD2", - "SYS_EXCHANGEDATA", - "SYS_EXECVE", - "SYS_EXIT", - "SYS_EXIT_GROUP", - "SYS_EXTATTRCTL", - "SYS_EXTATTR_DELETE_FD", - "SYS_EXTATTR_DELETE_FILE", - "SYS_EXTATTR_DELETE_LINK", - "SYS_EXTATTR_GET_FD", - "SYS_EXTATTR_GET_FILE", - "SYS_EXTATTR_GET_LINK", - "SYS_EXTATTR_LIST_FD", - "SYS_EXTATTR_LIST_FILE", - "SYS_EXTATTR_LIST_LINK", - "SYS_EXTATTR_SET_FD", - "SYS_EXTATTR_SET_FILE", - "SYS_EXTATTR_SET_LINK", - "SYS_FACCESSAT", - "SYS_FADVISE64", - "SYS_FADVISE64_64", - "SYS_FALLOCATE", - "SYS_FANOTIFY_INIT", - "SYS_FANOTIFY_MARK", - "SYS_FCHDIR", - "SYS_FCHFLAGS", - "SYS_FCHMOD", - "SYS_FCHMODAT", - "SYS_FCHMOD_EXTENDED", - "SYS_FCHOWN", - "SYS_FCHOWN32", - "SYS_FCHOWNAT", - "SYS_FCHROOT", - "SYS_FCNTL", - "SYS_FCNTL64", - "SYS_FCNTL_NOCANCEL", - "SYS_FDATASYNC", - "SYS_FEXECVE", - "SYS_FFCLOCK_GETCOUNTER", - "SYS_FFCLOCK_GETESTIMATE", - "SYS_FFCLOCK_SETESTIMATE", - "SYS_FFSCTL", - "SYS_FGETATTRLIST", - "SYS_FGETXATTR", - "SYS_FHOPEN", - "SYS_FHSTAT", - "SYS_FHSTATFS", - "SYS_FILEPORT_MAKEFD", - "SYS_FILEPORT_MAKEPORT", - "SYS_FKTRACE", - "SYS_FLISTXATTR", - "SYS_FLOCK", - "SYS_FORK", - "SYS_FPATHCONF", - "SYS_FREEBSD6_FTRUNCATE", - "SYS_FREEBSD6_LSEEK", - "SYS_FREEBSD6_MMAP", - "SYS_FREEBSD6_PREAD", - "SYS_FREEBSD6_PWRITE", - "SYS_FREEBSD6_TRUNCATE", - "SYS_FREMOVEXATTR", - "SYS_FSCTL", - "SYS_FSETATTRLIST", - "SYS_FSETXATTR", - "SYS_FSGETPATH", - "SYS_FSTAT", - "SYS_FSTAT64", - "SYS_FSTAT64_EXTENDED", - "SYS_FSTATAT", - "SYS_FSTATAT64", - "SYS_FSTATFS", - "SYS_FSTATFS64", - "SYS_FSTATV", - "SYS_FSTATVFS1", - "SYS_FSTAT_EXTENDED", - "SYS_FSYNC", - "SYS_FSYNC_NOCANCEL", - "SYS_FSYNC_RANGE", - "SYS_FTIME", - "SYS_FTRUNCATE", - "SYS_FTRUNCATE64", - "SYS_FUTEX", - "SYS_FUTIMENS", - "SYS_FUTIMES", - "SYS_FUTIMESAT", - "SYS_GETATTRLIST", - "SYS_GETAUDIT", - "SYS_GETAUDIT_ADDR", - "SYS_GETAUID", - "SYS_GETCONTEXT", - "SYS_GETCPU", - "SYS_GETCWD", - "SYS_GETDENTS", - "SYS_GETDENTS64", - "SYS_GETDIRENTRIES", - "SYS_GETDIRENTRIES64", - "SYS_GETDIRENTRIESATTR", - "SYS_GETDTABLECOUNT", - "SYS_GETDTABLESIZE", - "SYS_GETEGID", - "SYS_GETEGID32", - "SYS_GETEUID", - "SYS_GETEUID32", - "SYS_GETFH", - "SYS_GETFSSTAT", - "SYS_GETFSSTAT64", - "SYS_GETGID", - "SYS_GETGID32", - "SYS_GETGROUPS", - "SYS_GETGROUPS32", - "SYS_GETHOSTUUID", - "SYS_GETITIMER", - "SYS_GETLCID", - "SYS_GETLOGIN", - "SYS_GETLOGINCLASS", - "SYS_GETPEERNAME", - "SYS_GETPGID", - "SYS_GETPGRP", - "SYS_GETPID", - "SYS_GETPMSG", - "SYS_GETPPID", - "SYS_GETPRIORITY", - "SYS_GETRESGID", - "SYS_GETRESGID32", - "SYS_GETRESUID", - "SYS_GETRESUID32", - "SYS_GETRLIMIT", - "SYS_GETRTABLE", - "SYS_GETRUSAGE", - "SYS_GETSGROUPS", - "SYS_GETSID", - "SYS_GETSOCKNAME", - "SYS_GETSOCKOPT", - "SYS_GETTHRID", - "SYS_GETTID", - "SYS_GETTIMEOFDAY", - "SYS_GETUID", - "SYS_GETUID32", - "SYS_GETVFSSTAT", - "SYS_GETWGROUPS", - "SYS_GETXATTR", - "SYS_GET_KERNEL_SYMS", - "SYS_GET_MEMPOLICY", - "SYS_GET_ROBUST_LIST", - "SYS_GET_THREAD_AREA", - "SYS_GSSD_SYSCALL", - "SYS_GTTY", - "SYS_IDENTITYSVC", - "SYS_IDLE", - "SYS_INITGROUPS", - "SYS_INIT_MODULE", - "SYS_INOTIFY_ADD_WATCH", - "SYS_INOTIFY_INIT", - "SYS_INOTIFY_INIT1", - "SYS_INOTIFY_RM_WATCH", - "SYS_IOCTL", - "SYS_IOPERM", - "SYS_IOPL", - "SYS_IOPOLICYSYS", - "SYS_IOPRIO_GET", - "SYS_IOPRIO_SET", - "SYS_IO_CANCEL", - "SYS_IO_DESTROY", - "SYS_IO_GETEVENTS", - "SYS_IO_SETUP", - "SYS_IO_SUBMIT", - "SYS_IPC", - "SYS_ISSETUGID", - "SYS_JAIL", - "SYS_JAIL_ATTACH", - "SYS_JAIL_GET", - "SYS_JAIL_REMOVE", - "SYS_JAIL_SET", - "SYS_KAS_INFO", - "SYS_KDEBUG_TRACE", - "SYS_KENV", - "SYS_KEVENT", - "SYS_KEVENT64", - "SYS_KEXEC_LOAD", - "SYS_KEYCTL", - "SYS_KILL", - "SYS_KLDFIND", - "SYS_KLDFIRSTMOD", - "SYS_KLDLOAD", - "SYS_KLDNEXT", - "SYS_KLDSTAT", - "SYS_KLDSYM", - "SYS_KLDUNLOAD", - "SYS_KLDUNLOADF", - "SYS_KMQ_NOTIFY", - "SYS_KMQ_OPEN", - "SYS_KMQ_SETATTR", - "SYS_KMQ_TIMEDRECEIVE", - "SYS_KMQ_TIMEDSEND", - "SYS_KMQ_UNLINK", - "SYS_KQUEUE", - "SYS_KQUEUE1", - "SYS_KSEM_CLOSE", - "SYS_KSEM_DESTROY", - "SYS_KSEM_GETVALUE", - "SYS_KSEM_INIT", - "SYS_KSEM_OPEN", - "SYS_KSEM_POST", - "SYS_KSEM_TIMEDWAIT", - "SYS_KSEM_TRYWAIT", - "SYS_KSEM_UNLINK", - "SYS_KSEM_WAIT", - "SYS_KTIMER_CREATE", - "SYS_KTIMER_DELETE", - "SYS_KTIMER_GETOVERRUN", - "SYS_KTIMER_GETTIME", - "SYS_KTIMER_SETTIME", - "SYS_KTRACE", - "SYS_LCHFLAGS", - "SYS_LCHMOD", - "SYS_LCHOWN", - "SYS_LCHOWN32", - "SYS_LEDGER", - "SYS_LGETFH", - "SYS_LGETXATTR", - "SYS_LINK", - "SYS_LINKAT", - "SYS_LIO_LISTIO", - "SYS_LISTEN", - "SYS_LISTXATTR", - "SYS_LLISTXATTR", - "SYS_LOCK", - "SYS_LOOKUP_DCOOKIE", - "SYS_LPATHCONF", - "SYS_LREMOVEXATTR", - "SYS_LSEEK", - "SYS_LSETXATTR", - "SYS_LSTAT", - "SYS_LSTAT64", - "SYS_LSTAT64_EXTENDED", - "SYS_LSTATV", - "SYS_LSTAT_EXTENDED", - "SYS_LUTIMES", - "SYS_MAC_SYSCALL", - "SYS_MADVISE", - "SYS_MADVISE1", - "SYS_MAXSYSCALL", - "SYS_MBIND", - "SYS_MIGRATE_PAGES", - "SYS_MINCORE", - "SYS_MINHERIT", - "SYS_MKCOMPLEX", - "SYS_MKDIR", - "SYS_MKDIRAT", - "SYS_MKDIR_EXTENDED", - "SYS_MKFIFO", - "SYS_MKFIFOAT", - "SYS_MKFIFO_EXTENDED", - "SYS_MKNOD", - "SYS_MKNODAT", - "SYS_MLOCK", - "SYS_MLOCKALL", - "SYS_MMAP", - "SYS_MMAP2", - "SYS_MODCTL", - "SYS_MODFIND", - "SYS_MODFNEXT", - "SYS_MODIFY_LDT", - "SYS_MODNEXT", - "SYS_MODSTAT", - "SYS_MODWATCH", - "SYS_MOUNT", - "SYS_MOVE_PAGES", - "SYS_MPROTECT", - "SYS_MPX", - "SYS_MQUERY", - "SYS_MQ_GETSETATTR", - "SYS_MQ_NOTIFY", - "SYS_MQ_OPEN", - "SYS_MQ_TIMEDRECEIVE", - "SYS_MQ_TIMEDSEND", - "SYS_MQ_UNLINK", - "SYS_MREMAP", - "SYS_MSGCTL", - "SYS_MSGGET", - "SYS_MSGRCV", - "SYS_MSGRCV_NOCANCEL", - "SYS_MSGSND", - "SYS_MSGSND_NOCANCEL", - "SYS_MSGSYS", - "SYS_MSYNC", - "SYS_MSYNC_NOCANCEL", - "SYS_MUNLOCK", - "SYS_MUNLOCKALL", - "SYS_MUNMAP", - "SYS_NAME_TO_HANDLE_AT", - "SYS_NANOSLEEP", - "SYS_NEWFSTATAT", - "SYS_NFSCLNT", - "SYS_NFSSERVCTL", - "SYS_NFSSVC", - "SYS_NFSTAT", - "SYS_NICE", - "SYS_NLM_SYSCALL", - "SYS_NLSTAT", - "SYS_NMOUNT", - "SYS_NSTAT", - "SYS_NTP_ADJTIME", - "SYS_NTP_GETTIME", - "SYS_NUMA_GETAFFINITY", - "SYS_NUMA_SETAFFINITY", - "SYS_OABI_SYSCALL_BASE", - "SYS_OBREAK", - "SYS_OLDFSTAT", - "SYS_OLDLSTAT", - "SYS_OLDOLDUNAME", - "SYS_OLDSTAT", - "SYS_OLDUNAME", - "SYS_OPEN", - "SYS_OPENAT", - "SYS_OPENBSD_POLL", - "SYS_OPEN_BY_HANDLE_AT", - "SYS_OPEN_DPROTECTED_NP", - "SYS_OPEN_EXTENDED", - "SYS_OPEN_NOCANCEL", - "SYS_OVADVISE", - "SYS_PACCEPT", - "SYS_PATHCONF", - "SYS_PAUSE", - "SYS_PCICONFIG_IOBASE", - "SYS_PCICONFIG_READ", - "SYS_PCICONFIG_WRITE", - "SYS_PDFORK", - "SYS_PDGETPID", - "SYS_PDKILL", - "SYS_PERF_EVENT_OPEN", - "SYS_PERSONALITY", - "SYS_PID_HIBERNATE", - "SYS_PID_RESUME", - "SYS_PID_SHUTDOWN_SOCKETS", - "SYS_PID_SUSPEND", - "SYS_PIPE", - "SYS_PIPE2", - "SYS_PIVOT_ROOT", - "SYS_PMC_CONTROL", - "SYS_PMC_GET_INFO", - "SYS_POLL", - "SYS_POLLTS", - "SYS_POLL_NOCANCEL", - "SYS_POSIX_FADVISE", - "SYS_POSIX_FALLOCATE", - "SYS_POSIX_OPENPT", - "SYS_POSIX_SPAWN", - "SYS_PPOLL", - "SYS_PRCTL", - "SYS_PREAD", - "SYS_PREAD64", - "SYS_PREADV", - "SYS_PREAD_NOCANCEL", - "SYS_PRLIMIT64", - "SYS_PROCCTL", - "SYS_PROCESS_POLICY", - "SYS_PROCESS_VM_READV", - "SYS_PROCESS_VM_WRITEV", - "SYS_PROC_INFO", - "SYS_PROF", - "SYS_PROFIL", - "SYS_PSELECT", - "SYS_PSELECT6", - "SYS_PSET_ASSIGN", - "SYS_PSET_CREATE", - "SYS_PSET_DESTROY", - "SYS_PSYNCH_CVBROAD", - "SYS_PSYNCH_CVCLRPREPOST", - "SYS_PSYNCH_CVSIGNAL", - "SYS_PSYNCH_CVWAIT", - "SYS_PSYNCH_MUTEXDROP", - "SYS_PSYNCH_MUTEXWAIT", - "SYS_PSYNCH_RW_DOWNGRADE", - "SYS_PSYNCH_RW_LONGRDLOCK", - "SYS_PSYNCH_RW_RDLOCK", - "SYS_PSYNCH_RW_UNLOCK", - "SYS_PSYNCH_RW_UNLOCK2", - "SYS_PSYNCH_RW_UPGRADE", - "SYS_PSYNCH_RW_WRLOCK", - "SYS_PSYNCH_RW_YIELDWRLOCK", - "SYS_PTRACE", - "SYS_PUTPMSG", - "SYS_PWRITE", - "SYS_PWRITE64", - "SYS_PWRITEV", - "SYS_PWRITE_NOCANCEL", - "SYS_QUERY_MODULE", - "SYS_QUOTACTL", - "SYS_RASCTL", - "SYS_RCTL_ADD_RULE", - "SYS_RCTL_GET_LIMITS", - "SYS_RCTL_GET_RACCT", - "SYS_RCTL_GET_RULES", - "SYS_RCTL_REMOVE_RULE", - "SYS_READ", - "SYS_READAHEAD", - "SYS_READDIR", - "SYS_READLINK", - "SYS_READLINKAT", - "SYS_READV", - "SYS_READV_NOCANCEL", - "SYS_READ_NOCANCEL", - "SYS_REBOOT", - "SYS_RECV", - "SYS_RECVFROM", - "SYS_RECVFROM_NOCANCEL", - "SYS_RECVMMSG", - "SYS_RECVMSG", - "SYS_RECVMSG_NOCANCEL", - "SYS_REMAP_FILE_PAGES", - "SYS_REMOVEXATTR", - "SYS_RENAME", - "SYS_RENAMEAT", - "SYS_REQUEST_KEY", - "SYS_RESTART_SYSCALL", - "SYS_REVOKE", - "SYS_RFORK", - "SYS_RMDIR", - "SYS_RTPRIO", - "SYS_RTPRIO_THREAD", - "SYS_RT_SIGACTION", - "SYS_RT_SIGPENDING", - "SYS_RT_SIGPROCMASK", - "SYS_RT_SIGQUEUEINFO", - "SYS_RT_SIGRETURN", - "SYS_RT_SIGSUSPEND", - "SYS_RT_SIGTIMEDWAIT", - "SYS_RT_TGSIGQUEUEINFO", - "SYS_SBRK", - "SYS_SCHED_GETAFFINITY", - "SYS_SCHED_GETPARAM", - "SYS_SCHED_GETSCHEDULER", - "SYS_SCHED_GET_PRIORITY_MAX", - "SYS_SCHED_GET_PRIORITY_MIN", - "SYS_SCHED_RR_GET_INTERVAL", - "SYS_SCHED_SETAFFINITY", - "SYS_SCHED_SETPARAM", - "SYS_SCHED_SETSCHEDULER", - "SYS_SCHED_YIELD", - "SYS_SCTP_GENERIC_RECVMSG", - "SYS_SCTP_GENERIC_SENDMSG", - "SYS_SCTP_GENERIC_SENDMSG_IOV", - "SYS_SCTP_PEELOFF", - "SYS_SEARCHFS", - "SYS_SECURITY", - "SYS_SELECT", - "SYS_SELECT_NOCANCEL", - "SYS_SEMCONFIG", - "SYS_SEMCTL", - "SYS_SEMGET", - "SYS_SEMOP", - "SYS_SEMSYS", - "SYS_SEMTIMEDOP", - "SYS_SEM_CLOSE", - "SYS_SEM_DESTROY", - "SYS_SEM_GETVALUE", - "SYS_SEM_INIT", - "SYS_SEM_OPEN", - "SYS_SEM_POST", - "SYS_SEM_TRYWAIT", - "SYS_SEM_UNLINK", - "SYS_SEM_WAIT", - "SYS_SEM_WAIT_NOCANCEL", - "SYS_SEND", - "SYS_SENDFILE", - "SYS_SENDFILE64", - "SYS_SENDMMSG", - "SYS_SENDMSG", - "SYS_SENDMSG_NOCANCEL", - "SYS_SENDTO", - "SYS_SENDTO_NOCANCEL", - "SYS_SETATTRLIST", - "SYS_SETAUDIT", - "SYS_SETAUDIT_ADDR", - "SYS_SETAUID", - "SYS_SETCONTEXT", - "SYS_SETDOMAINNAME", - "SYS_SETEGID", - "SYS_SETEUID", - "SYS_SETFIB", - "SYS_SETFSGID", - "SYS_SETFSGID32", - "SYS_SETFSUID", - "SYS_SETFSUID32", - "SYS_SETGID", - "SYS_SETGID32", - "SYS_SETGROUPS", - "SYS_SETGROUPS32", - "SYS_SETHOSTNAME", - "SYS_SETITIMER", - "SYS_SETLCID", - "SYS_SETLOGIN", - "SYS_SETLOGINCLASS", - "SYS_SETNS", - "SYS_SETPGID", - "SYS_SETPRIORITY", - "SYS_SETPRIVEXEC", - "SYS_SETREGID", - "SYS_SETREGID32", - "SYS_SETRESGID", - "SYS_SETRESGID32", - "SYS_SETRESUID", - "SYS_SETRESUID32", - "SYS_SETREUID", - "SYS_SETREUID32", - "SYS_SETRLIMIT", - "SYS_SETRTABLE", - "SYS_SETSGROUPS", - "SYS_SETSID", - "SYS_SETSOCKOPT", - "SYS_SETTID", - "SYS_SETTID_WITH_PID", - "SYS_SETTIMEOFDAY", - "SYS_SETUID", - "SYS_SETUID32", - "SYS_SETWGROUPS", - "SYS_SETXATTR", - "SYS_SET_MEMPOLICY", - "SYS_SET_ROBUST_LIST", - "SYS_SET_THREAD_AREA", - "SYS_SET_TID_ADDRESS", - "SYS_SGETMASK", - "SYS_SHARED_REGION_CHECK_NP", - "SYS_SHARED_REGION_MAP_AND_SLIDE_NP", - "SYS_SHMAT", - "SYS_SHMCTL", - "SYS_SHMDT", - "SYS_SHMGET", - "SYS_SHMSYS", - "SYS_SHM_OPEN", - "SYS_SHM_UNLINK", - "SYS_SHUTDOWN", - "SYS_SIGACTION", - "SYS_SIGALTSTACK", - "SYS_SIGNAL", - "SYS_SIGNALFD", - "SYS_SIGNALFD4", - "SYS_SIGPENDING", - "SYS_SIGPROCMASK", - "SYS_SIGQUEUE", - "SYS_SIGQUEUEINFO", - "SYS_SIGRETURN", - "SYS_SIGSUSPEND", - "SYS_SIGSUSPEND_NOCANCEL", - "SYS_SIGTIMEDWAIT", - "SYS_SIGWAIT", - "SYS_SIGWAITINFO", - "SYS_SOCKET", - "SYS_SOCKETCALL", - "SYS_SOCKETPAIR", - "SYS_SPLICE", - "SYS_SSETMASK", - "SYS_SSTK", - "SYS_STACK_SNAPSHOT", - "SYS_STAT", - "SYS_STAT64", - "SYS_STAT64_EXTENDED", - "SYS_STATFS", - "SYS_STATFS64", - "SYS_STATV", - "SYS_STATVFS1", - "SYS_STAT_EXTENDED", - "SYS_STIME", - "SYS_STTY", - "SYS_SWAPCONTEXT", - "SYS_SWAPCTL", - "SYS_SWAPOFF", - "SYS_SWAPON", - "SYS_SYMLINK", - "SYS_SYMLINKAT", - "SYS_SYNC", - "SYS_SYNCFS", - "SYS_SYNC_FILE_RANGE", - "SYS_SYSARCH", - "SYS_SYSCALL", - "SYS_SYSCALL_BASE", - "SYS_SYSFS", - "SYS_SYSINFO", - "SYS_SYSLOG", - "SYS_TEE", - "SYS_TGKILL", - "SYS_THREAD_SELFID", - "SYS_THR_CREATE", - "SYS_THR_EXIT", - "SYS_THR_KILL", - "SYS_THR_KILL2", - "SYS_THR_NEW", - "SYS_THR_SELF", - "SYS_THR_SET_NAME", - "SYS_THR_SUSPEND", - "SYS_THR_WAKE", - "SYS_TIME", - "SYS_TIMERFD_CREATE", - "SYS_TIMERFD_GETTIME", - "SYS_TIMERFD_SETTIME", - "SYS_TIMER_CREATE", - "SYS_TIMER_DELETE", - "SYS_TIMER_GETOVERRUN", - "SYS_TIMER_GETTIME", - "SYS_TIMER_SETTIME", - "SYS_TIMES", - "SYS_TKILL", - "SYS_TRUNCATE", - "SYS_TRUNCATE64", - "SYS_TUXCALL", - "SYS_UGETRLIMIT", - "SYS_ULIMIT", - "SYS_UMASK", - "SYS_UMASK_EXTENDED", - "SYS_UMOUNT", - "SYS_UMOUNT2", - "SYS_UNAME", - "SYS_UNDELETE", - "SYS_UNLINK", - "SYS_UNLINKAT", - "SYS_UNMOUNT", - "SYS_UNSHARE", - "SYS_USELIB", - "SYS_USTAT", - "SYS_UTIME", - "SYS_UTIMENSAT", - "SYS_UTIMES", - "SYS_UTRACE", - "SYS_UUIDGEN", - "SYS_VADVISE", - "SYS_VFORK", - "SYS_VHANGUP", - "SYS_VM86", - "SYS_VM86OLD", - "SYS_VMSPLICE", - "SYS_VM_PRESSURE_MONITOR", - "SYS_VSERVER", - "SYS_WAIT4", - "SYS_WAIT4_NOCANCEL", - "SYS_WAIT6", - "SYS_WAITEVENT", - "SYS_WAITID", - "SYS_WAITID_NOCANCEL", - "SYS_WAITPID", - "SYS_WATCHEVENT", - "SYS_WORKQ_KERNRETURN", - "SYS_WORKQ_OPEN", - "SYS_WRITE", - "SYS_WRITEV", - "SYS_WRITEV_NOCANCEL", - "SYS_WRITE_NOCANCEL", - "SYS_YIELD", - "SYS__LLSEEK", - "SYS__LWP_CONTINUE", - "SYS__LWP_CREATE", - "SYS__LWP_CTL", - "SYS__LWP_DETACH", - "SYS__LWP_EXIT", - "SYS__LWP_GETNAME", - "SYS__LWP_GETPRIVATE", - "SYS__LWP_KILL", - "SYS__LWP_PARK", - "SYS__LWP_SELF", - "SYS__LWP_SETNAME", - "SYS__LWP_SETPRIVATE", - "SYS__LWP_SUSPEND", - "SYS__LWP_UNPARK", - "SYS__LWP_UNPARK_ALL", - "SYS__LWP_WAIT", - "SYS__LWP_WAKEUP", - "SYS__NEWSELECT", - "SYS__PSET_BIND", - "SYS__SCHED_GETAFFINITY", - "SYS__SCHED_GETPARAM", - "SYS__SCHED_SETAFFINITY", - "SYS__SCHED_SETPARAM", - "SYS__SYSCTL", - "SYS__UMTX_LOCK", - "SYS__UMTX_OP", - "SYS__UMTX_UNLOCK", - "SYS___ACL_ACLCHECK_FD", - "SYS___ACL_ACLCHECK_FILE", - "SYS___ACL_ACLCHECK_LINK", - "SYS___ACL_DELETE_FD", - "SYS___ACL_DELETE_FILE", - "SYS___ACL_DELETE_LINK", - "SYS___ACL_GET_FD", - "SYS___ACL_GET_FILE", - "SYS___ACL_GET_LINK", - "SYS___ACL_SET_FD", - "SYS___ACL_SET_FILE", - "SYS___ACL_SET_LINK", - "SYS___CAP_RIGHTS_GET", - "SYS___CLONE", - "SYS___DISABLE_THREADSIGNAL", - "SYS___GETCWD", - "SYS___GETLOGIN", - "SYS___GET_TCB", - "SYS___MAC_EXECVE", - "SYS___MAC_GETFSSTAT", - "SYS___MAC_GET_FD", - "SYS___MAC_GET_FILE", - "SYS___MAC_GET_LCID", - "SYS___MAC_GET_LCTX", - "SYS___MAC_GET_LINK", - "SYS___MAC_GET_MOUNT", - "SYS___MAC_GET_PID", - "SYS___MAC_GET_PROC", - "SYS___MAC_MOUNT", - "SYS___MAC_SET_FD", - "SYS___MAC_SET_FILE", - "SYS___MAC_SET_LCTX", - "SYS___MAC_SET_LINK", - "SYS___MAC_SET_PROC", - "SYS___MAC_SYSCALL", - "SYS___OLD_SEMWAIT_SIGNAL", - "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", - "SYS___POSIX_CHOWN", - "SYS___POSIX_FCHOWN", - "SYS___POSIX_LCHOWN", - "SYS___POSIX_RENAME", - "SYS___PTHREAD_CANCELED", - "SYS___PTHREAD_CHDIR", - "SYS___PTHREAD_FCHDIR", - "SYS___PTHREAD_KILL", - "SYS___PTHREAD_MARKCANCEL", - "SYS___PTHREAD_SIGMASK", - "SYS___QUOTACTL", - "SYS___SEMCTL", - "SYS___SEMWAIT_SIGNAL", - "SYS___SEMWAIT_SIGNAL_NOCANCEL", - "SYS___SETLOGIN", - "SYS___SETUGID", - "SYS___SET_TCB", - "SYS___SIGACTION_SIGTRAMP", - "SYS___SIGTIMEDWAIT", - "SYS___SIGWAIT", - "SYS___SIGWAIT_NOCANCEL", - "SYS___SYSCTL", - "SYS___TFORK", - "SYS___THREXIT", - "SYS___THRSIGDIVERT", - "SYS___THRSLEEP", - "SYS___THRWAKEUP", - "S_ARCH1", - "S_ARCH2", - "S_BLKSIZE", - "S_IEXEC", - "S_IFBLK", - "S_IFCHR", - "S_IFDIR", - "S_IFIFO", - "S_IFLNK", - "S_IFMT", - "S_IFREG", - "S_IFSOCK", - "S_IFWHT", - "S_IREAD", - "S_IRGRP", - "S_IROTH", - "S_IRUSR", - "S_IRWXG", - "S_IRWXO", - "S_IRWXU", - "S_ISGID", - "S_ISTXT", - "S_ISUID", - "S_ISVTX", - "S_IWGRP", - "S_IWOTH", - "S_IWRITE", - "S_IWUSR", - "S_IXGRP", - "S_IXOTH", - "S_IXUSR", - "S_LOGIN_SET", - "SecurityAttributes", - "Seek", - "Select", - "Sendfile", - "Sendmsg", - "SendmsgN", - "Sendto", - "Servent", - "SetBpf", - "SetBpfBuflen", - "SetBpfDatalink", - "SetBpfHeadercmpl", - "SetBpfImmediate", - "SetBpfInterface", - "SetBpfPromisc", - "SetBpfTimeout", - "SetCurrentDirectory", - "SetEndOfFile", - "SetEnvironmentVariable", - "SetFileAttributes", - "SetFileCompletionNotificationModes", - "SetFilePointer", - "SetFileTime", - "SetHandleInformation", - "SetKevent", - "SetLsfPromisc", - "SetNonblock", - "Setdomainname", - "Setegid", - "Setenv", - "Seteuid", - "Setfsgid", - "Setfsuid", - "Setgid", - "Setgroups", - "Sethostname", - "Setlogin", - "Setpgid", - "Setpriority", - "Setprivexec", - "Setregid", - "Setresgid", - "Setresuid", - "Setreuid", - "Setrlimit", - "Setsid", - "Setsockopt", - "SetsockoptByte", - "SetsockoptICMPv6Filter", - "SetsockoptIPMreq", - "SetsockoptIPMreqn", - "SetsockoptIPv6Mreq", - "SetsockoptInet4Addr", - "SetsockoptInt", - "SetsockoptLinger", - "SetsockoptString", - "SetsockoptTimeval", - "Settimeofday", - "Setuid", - "Setxattr", - "Shutdown", - "SidTypeAlias", - "SidTypeComputer", - "SidTypeDeletedAccount", - "SidTypeDomain", - "SidTypeGroup", - "SidTypeInvalid", - "SidTypeLabel", - "SidTypeUnknown", - "SidTypeUser", - "SidTypeWellKnownGroup", - "Signal", - "SizeofBpfHdr", - "SizeofBpfInsn", - "SizeofBpfProgram", - "SizeofBpfStat", - "SizeofBpfVersion", - "SizeofBpfZbuf", - "SizeofBpfZbufHeader", - "SizeofCmsghdr", - "SizeofICMPv6Filter", - "SizeofIPMreq", - "SizeofIPMreqn", - "SizeofIPv6MTUInfo", - "SizeofIPv6Mreq", - "SizeofIfAddrmsg", - "SizeofIfAnnounceMsghdr", - "SizeofIfData", - "SizeofIfInfomsg", - "SizeofIfMsghdr", - "SizeofIfaMsghdr", - "SizeofIfmaMsghdr", - "SizeofIfmaMsghdr2", - "SizeofInet4Pktinfo", - "SizeofInet6Pktinfo", - "SizeofInotifyEvent", - "SizeofLinger", - "SizeofMsghdr", - "SizeofNlAttr", - "SizeofNlMsgerr", - "SizeofNlMsghdr", - "SizeofRtAttr", - "SizeofRtGenmsg", - "SizeofRtMetrics", - "SizeofRtMsg", - "SizeofRtMsghdr", - "SizeofRtNexthop", - "SizeofSockFilter", - "SizeofSockFprog", - "SizeofSockaddrAny", - "SizeofSockaddrDatalink", - "SizeofSockaddrInet4", - "SizeofSockaddrInet6", - "SizeofSockaddrLinklayer", - "SizeofSockaddrNetlink", - "SizeofSockaddrUnix", - "SizeofTCPInfo", - "SizeofUcred", - "SlicePtrFromStrings", - "SockFilter", - "SockFprog", - "Sockaddr", - "SockaddrDatalink", - "SockaddrGen", - "SockaddrInet4", - "SockaddrInet6", - "SockaddrLinklayer", - "SockaddrNetlink", - "SockaddrUnix", - "Socket", - "SocketControlMessage", - "SocketDisableIPv6", - "Socketpair", - "Splice", - "StartProcess", - "StartupInfo", - "Stat", - "Stat_t", - "Statfs", - "Statfs_t", - "Stderr", - "Stdin", - "Stdout", - "StringBytePtr", - "StringByteSlice", - "StringSlicePtr", - "StringToSid", - "StringToUTF16", - "StringToUTF16Ptr", - "Symlink", - "Sync", - "SyncFileRange", - "SysProcAttr", - "SysProcIDMap", - "Syscall", - "Syscall12", - "Syscall15", - "Syscall18", - "Syscall6", - "Syscall9", - "SyscallN", - "Sysctl", - "SysctlUint32", - "Sysctlnode", - "Sysinfo", - "Sysinfo_t", - "Systemtime", - "TCGETS", - "TCIFLUSH", - "TCIOFLUSH", - "TCOFLUSH", - "TCPInfo", - "TCPKeepalive", - "TCP_CA_NAME_MAX", - "TCP_CONGCTL", - "TCP_CONGESTION", - "TCP_CONNECTIONTIMEOUT", - "TCP_CORK", - "TCP_DEFER_ACCEPT", - "TCP_ENABLE_ECN", - "TCP_INFO", - "TCP_KEEPALIVE", - "TCP_KEEPCNT", - "TCP_KEEPIDLE", - "TCP_KEEPINIT", - "TCP_KEEPINTVL", - "TCP_LINGER2", - "TCP_MAXBURST", - "TCP_MAXHLEN", - "TCP_MAXOLEN", - "TCP_MAXSEG", - "TCP_MAXWIN", - "TCP_MAX_SACK", - "TCP_MAX_WINSHIFT", - "TCP_MD5SIG", - "TCP_MD5SIG_MAXKEYLEN", - "TCP_MINMSS", - "TCP_MINMSSOVERLOAD", - "TCP_MSS", - "TCP_NODELAY", - "TCP_NOOPT", - "TCP_NOPUSH", - "TCP_NOTSENT_LOWAT", - "TCP_NSTATES", - "TCP_QUICKACK", - "TCP_RXT_CONNDROPTIME", - "TCP_RXT_FINDROP", - "TCP_SACK_ENABLE", - "TCP_SENDMOREACKS", - "TCP_SYNCNT", - "TCP_VENDOR", - "TCP_WINDOW_CLAMP", - "TCSAFLUSH", - "TCSETS", - "TF_DISCONNECT", - "TF_REUSE_SOCKET", - "TF_USE_DEFAULT_WORKER", - "TF_USE_KERNEL_APC", - "TF_USE_SYSTEM_THREAD", - "TF_WRITE_BEHIND", - "TH32CS_INHERIT", - "TH32CS_SNAPALL", - "TH32CS_SNAPHEAPLIST", - "TH32CS_SNAPMODULE", - "TH32CS_SNAPMODULE32", - "TH32CS_SNAPPROCESS", - "TH32CS_SNAPTHREAD", - "TIME_ZONE_ID_DAYLIGHT", - "TIME_ZONE_ID_STANDARD", - "TIME_ZONE_ID_UNKNOWN", - "TIOCCBRK", - "TIOCCDTR", - "TIOCCONS", - "TIOCDCDTIMESTAMP", - "TIOCDRAIN", - "TIOCDSIMICROCODE", - "TIOCEXCL", - "TIOCEXT", - "TIOCFLAG_CDTRCTS", - "TIOCFLAG_CLOCAL", - "TIOCFLAG_CRTSCTS", - "TIOCFLAG_MDMBUF", - "TIOCFLAG_PPS", - "TIOCFLAG_SOFTCAR", - "TIOCFLUSH", - "TIOCGDEV", - "TIOCGDRAINWAIT", - "TIOCGETA", - "TIOCGETD", - "TIOCGFLAGS", - "TIOCGICOUNT", - "TIOCGLCKTRMIOS", - "TIOCGLINED", - "TIOCGPGRP", - "TIOCGPTN", - "TIOCGQSIZE", - "TIOCGRANTPT", - "TIOCGRS485", - "TIOCGSERIAL", - "TIOCGSID", - "TIOCGSIZE", - "TIOCGSOFTCAR", - "TIOCGTSTAMP", - "TIOCGWINSZ", - "TIOCINQ", - "TIOCIXOFF", - "TIOCIXON", - "TIOCLINUX", - "TIOCMBIC", - "TIOCMBIS", - "TIOCMGDTRWAIT", - "TIOCMGET", - "TIOCMIWAIT", - "TIOCMODG", - "TIOCMODS", - "TIOCMSDTRWAIT", - "TIOCMSET", - "TIOCM_CAR", - "TIOCM_CD", - "TIOCM_CTS", - "TIOCM_DCD", - "TIOCM_DSR", - "TIOCM_DTR", - "TIOCM_LE", - "TIOCM_RI", - "TIOCM_RNG", - "TIOCM_RTS", - "TIOCM_SR", - "TIOCM_ST", - "TIOCNOTTY", - "TIOCNXCL", - "TIOCOUTQ", - "TIOCPKT", - "TIOCPKT_DATA", - "TIOCPKT_DOSTOP", - "TIOCPKT_FLUSHREAD", - "TIOCPKT_FLUSHWRITE", - "TIOCPKT_IOCTL", - "TIOCPKT_NOSTOP", - "TIOCPKT_START", - "TIOCPKT_STOP", - "TIOCPTMASTER", - "TIOCPTMGET", - "TIOCPTSNAME", - "TIOCPTYGNAME", - "TIOCPTYGRANT", - "TIOCPTYUNLK", - "TIOCRCVFRAME", - "TIOCREMOTE", - "TIOCSBRK", - "TIOCSCONS", - "TIOCSCTTY", - "TIOCSDRAINWAIT", - "TIOCSDTR", - "TIOCSERCONFIG", - "TIOCSERGETLSR", - "TIOCSERGETMULTI", - "TIOCSERGSTRUCT", - "TIOCSERGWILD", - "TIOCSERSETMULTI", - "TIOCSERSWILD", - "TIOCSER_TEMT", - "TIOCSETA", - "TIOCSETAF", - "TIOCSETAW", - "TIOCSETD", - "TIOCSFLAGS", - "TIOCSIG", - "TIOCSLCKTRMIOS", - "TIOCSLINED", - "TIOCSPGRP", - "TIOCSPTLCK", - "TIOCSQSIZE", - "TIOCSRS485", - "TIOCSSERIAL", - "TIOCSSIZE", - "TIOCSSOFTCAR", - "TIOCSTART", - "TIOCSTAT", - "TIOCSTI", - "TIOCSTOP", - "TIOCSTSTAMP", - "TIOCSWINSZ", - "TIOCTIMESTAMP", - "TIOCUCNTL", - "TIOCVHANGUP", - "TIOCXMTFRAME", - "TOKEN_ADJUST_DEFAULT", - "TOKEN_ADJUST_GROUPS", - "TOKEN_ADJUST_PRIVILEGES", - "TOKEN_ADJUST_SESSIONID", - "TOKEN_ALL_ACCESS", - "TOKEN_ASSIGN_PRIMARY", - "TOKEN_DUPLICATE", - "TOKEN_EXECUTE", - "TOKEN_IMPERSONATE", - "TOKEN_QUERY", - "TOKEN_QUERY_SOURCE", - "TOKEN_READ", - "TOKEN_WRITE", - "TOSTOP", - "TRUNCATE_EXISTING", - "TUNATTACHFILTER", - "TUNDETACHFILTER", - "TUNGETFEATURES", - "TUNGETIFF", - "TUNGETSNDBUF", - "TUNGETVNETHDRSZ", - "TUNSETDEBUG", - "TUNSETGROUP", - "TUNSETIFF", - "TUNSETLINK", - "TUNSETNOCSUM", - "TUNSETOFFLOAD", - "TUNSETOWNER", - "TUNSETPERSIST", - "TUNSETSNDBUF", - "TUNSETTXFILTER", - "TUNSETVNETHDRSZ", - "Tee", - "TerminateProcess", - "Termios", - "Tgkill", - "Time", - "Time_t", - "Times", - "Timespec", - "TimespecToNsec", - "Timeval", - "Timeval32", - "TimevalToNsec", - "Timex", - "Timezoneinformation", - "Tms", - "Token", - "TokenAccessInformation", - "TokenAuditPolicy", - "TokenDefaultDacl", - "TokenElevation", - "TokenElevationType", - "TokenGroups", - "TokenGroupsAndPrivileges", - "TokenHasRestrictions", - "TokenImpersonationLevel", - "TokenIntegrityLevel", - "TokenLinkedToken", - "TokenLogonSid", - "TokenMandatoryPolicy", - "TokenOrigin", - "TokenOwner", - "TokenPrimaryGroup", - "TokenPrivileges", - "TokenRestrictedSids", - "TokenSandBoxInert", - "TokenSessionId", - "TokenSessionReference", - "TokenSource", - "TokenStatistics", - "TokenType", - "TokenUIAccess", - "TokenUser", - "TokenVirtualizationAllowed", - "TokenVirtualizationEnabled", - "Tokenprimarygroup", - "Tokenuser", - "TranslateAccountName", - "TranslateName", - "TransmitFile", - "TransmitFileBuffers", - "Truncate", - "UNIX_PATH_MAX", - "USAGE_MATCH_TYPE_AND", - "USAGE_MATCH_TYPE_OR", - "UTF16FromString", - "UTF16PtrFromString", - "UTF16ToString", - "Ucred", - "Umask", - "Uname", - "Undelete", - "UnixCredentials", - "UnixRights", - "Unlink", - "Unlinkat", - "UnmapViewOfFile", - "Unmount", - "Unsetenv", - "Unshare", - "UserInfo10", - "Ustat", - "Ustat_t", - "Utimbuf", - "Utime", - "Utimes", - "UtimesNano", - "Utsname", - "VDISCARD", - "VDSUSP", - "VEOF", - "VEOL", - "VEOL2", - "VERASE", - "VERASE2", - "VINTR", - "VKILL", - "VLNEXT", - "VMIN", - "VQUIT", - "VREPRINT", - "VSTART", - "VSTATUS", - "VSTOP", - "VSUSP", - "VSWTC", - "VT0", - "VT1", - "VTDLY", - "VTIME", - "VWERASE", - "VirtualLock", - "VirtualUnlock", - "WAIT_ABANDONED", - "WAIT_FAILED", - "WAIT_OBJECT_0", - "WAIT_TIMEOUT", - "WALL", - "WALLSIG", - "WALTSIG", - "WCLONE", - "WCONTINUED", - "WCOREFLAG", - "WEXITED", - "WLINUXCLONE", - "WNOHANG", - "WNOTHREAD", - "WNOWAIT", - "WNOZOMBIE", - "WOPTSCHECKED", - "WORDSIZE", - "WSABuf", - "WSACleanup", - "WSADESCRIPTION_LEN", - "WSAData", - "WSAEACCES", - "WSAECONNABORTED", - "WSAECONNRESET", - "WSAEnumProtocols", - "WSAID_CONNECTEX", - "WSAIoctl", - "WSAPROTOCOL_LEN", - "WSAProtocolChain", - "WSAProtocolInfo", - "WSARecv", - "WSARecvFrom", - "WSASYS_STATUS_LEN", - "WSASend", - "WSASendTo", - "WSASendto", - "WSAStartup", - "WSTOPPED", - "WTRAPPED", - "WUNTRACED", - "Wait4", - "WaitForSingleObject", - "WaitStatus", - "Win32FileAttributeData", - "Win32finddata", - "Write", - "WriteConsole", - "WriteFile", - "X509_ASN_ENCODING", - "XCASE", - "XP1_CONNECTIONLESS", - "XP1_CONNECT_DATA", - "XP1_DISCONNECT_DATA", - "XP1_EXPEDITED_DATA", - "XP1_GRACEFUL_CLOSE", - "XP1_GUARANTEED_DELIVERY", - "XP1_GUARANTEED_ORDER", - "XP1_IFS_HANDLES", - "XP1_MESSAGE_ORIENTED", - "XP1_MULTIPOINT_CONTROL_PLANE", - "XP1_MULTIPOINT_DATA_PLANE", - "XP1_PARTIAL_MESSAGE", - "XP1_PSEUDO_STREAM", - "XP1_QOS_SUPPORTED", - "XP1_SAN_SUPPORT_SDP", - "XP1_SUPPORT_BROADCAST", - "XP1_SUPPORT_MULTIPOINT", - "XP1_UNI_RECV", - "XP1_UNI_SEND", - }, - "syscall/js": { - "CopyBytesToGo", - "CopyBytesToJS", - "Error", - "Func", - "FuncOf", - "Global", - "Null", - "Type", - "TypeBoolean", - "TypeFunction", - "TypeNull", - "TypeNumber", - "TypeObject", - "TypeString", - "TypeSymbol", - "TypeUndefined", - "Undefined", - "Value", - "ValueError", - "ValueOf", - }, - "testing": { - "AllocsPerRun", - "B", - "Benchmark", - "BenchmarkResult", - "Cover", - "CoverBlock", - "CoverMode", - "Coverage", - "F", - "Init", - "InternalBenchmark", - "InternalExample", - "InternalFuzzTarget", - "InternalTest", - "M", - "Main", - "MainStart", - "PB", - "RegisterCover", - "RunBenchmarks", - "RunExamples", - "RunTests", - "Short", - "T", - "TB", - "Testing", - "Verbose", - }, - "testing/fstest": { - "MapFS", - "MapFile", - "TestFS", - }, - "testing/iotest": { - "DataErrReader", - "ErrReader", - "ErrTimeout", - "HalfReader", - "NewReadLogger", - "NewWriteLogger", - "OneByteReader", - "TestReader", - "TimeoutReader", - "TruncateWriter", - }, - "testing/quick": { - "Check", - "CheckEqual", - "CheckEqualError", - "CheckError", - "Config", - "Generator", - "SetupError", - "Value", - }, - "testing/slogtest": { - "TestHandler", - }, - "text/scanner": { - "Char", - "Comment", - "EOF", - "Float", - "GoTokens", - "GoWhitespace", - "Ident", - "Int", - "Position", - "RawString", - "ScanChars", - "ScanComments", - "ScanFloats", - "ScanIdents", - "ScanInts", - "ScanRawStrings", - "ScanStrings", - "Scanner", - "SkipComments", - "String", - "TokenString", - }, - "text/tabwriter": { - "AlignRight", - "Debug", - "DiscardEmptyColumns", - "Escape", - "FilterHTML", - "NewWriter", - "StripEscape", - "TabIndent", - "Writer", - }, - "text/template": { - "ExecError", - "FuncMap", - "HTMLEscape", - "HTMLEscapeString", - "HTMLEscaper", - "IsTrue", - "JSEscape", - "JSEscapeString", - "JSEscaper", - "Must", - "New", - "ParseFS", - "ParseFiles", - "ParseGlob", - "Template", - "URLQueryEscaper", - }, - "text/template/parse": { - "ActionNode", - "BoolNode", - "BranchNode", - "BreakNode", - "ChainNode", - "CommandNode", - "CommentNode", - "ContinueNode", - "DotNode", - "FieldNode", - "IdentifierNode", - "IfNode", - "IsEmptyTree", - "ListNode", - "Mode", - "New", - "NewIdentifier", - "NilNode", - "Node", - "NodeAction", - "NodeBool", - "NodeBreak", - "NodeChain", - "NodeCommand", - "NodeComment", - "NodeContinue", - "NodeDot", - "NodeField", - "NodeIdentifier", - "NodeIf", - "NodeList", - "NodeNil", - "NodeNumber", - "NodePipe", - "NodeRange", - "NodeString", - "NodeTemplate", - "NodeText", - "NodeType", - "NodeVariable", - "NodeWith", - "NumberNode", - "Parse", - "ParseComments", - "PipeNode", - "Pos", - "RangeNode", - "SkipFuncCheck", - "StringNode", - "TemplateNode", - "TextNode", - "Tree", - "VariableNode", - "WithNode", - }, - "time": { - "ANSIC", - "After", - "AfterFunc", - "April", - "August", - "Date", - "DateOnly", - "DateTime", - "December", - "Duration", - "February", - "FixedZone", - "Friday", - "Hour", - "January", - "July", - "June", - "Kitchen", - "Layout", - "LoadLocation", - "LoadLocationFromTZData", - "Local", - "Location", - "March", - "May", - "Microsecond", - "Millisecond", - "Minute", - "Monday", - "Month", - "Nanosecond", - "NewTicker", - "NewTimer", - "November", - "Now", - "October", - "Parse", - "ParseDuration", - "ParseError", - "ParseInLocation", - "RFC1123", - "RFC1123Z", - "RFC3339", - "RFC3339Nano", - "RFC822", - "RFC822Z", - "RFC850", - "RubyDate", - "Saturday", - "Second", - "September", - "Since", - "Sleep", - "Stamp", - "StampMicro", - "StampMilli", - "StampNano", - "Sunday", - "Thursday", - "Tick", - "Ticker", - "Time", - "TimeOnly", - "Timer", - "Tuesday", - "UTC", - "Unix", - "UnixDate", - "UnixMicro", - "UnixMilli", - "Until", - "Wednesday", - "Weekday", - }, - "unicode": { - "ASCII_Hex_Digit", - "Adlam", - "Ahom", - "Anatolian_Hieroglyphs", - "Arabic", - "Armenian", - "Avestan", - "AzeriCase", - "Balinese", - "Bamum", - "Bassa_Vah", - "Batak", - "Bengali", - "Bhaiksuki", - "Bidi_Control", - "Bopomofo", - "Brahmi", - "Braille", - "Buginese", - "Buhid", - "C", - "Canadian_Aboriginal", - "Carian", - "CaseRange", - "CaseRanges", - "Categories", - "Caucasian_Albanian", - "Cc", - "Cf", - "Chakma", - "Cham", - "Cherokee", - "Chorasmian", - "Co", - "Common", - "Coptic", - "Cs", - "Cuneiform", - "Cypriot", - "Cypro_Minoan", - "Cyrillic", - "Dash", - "Deprecated", - "Deseret", - "Devanagari", - "Diacritic", - "Digit", - "Dives_Akuru", - "Dogra", - "Duployan", - "Egyptian_Hieroglyphs", - "Elbasan", - "Elymaic", - "Ethiopic", - "Extender", - "FoldCategory", - "FoldScript", - "Georgian", - "Glagolitic", - "Gothic", - "Grantha", - "GraphicRanges", - "Greek", - "Gujarati", - "Gunjala_Gondi", - "Gurmukhi", - "Han", - "Hangul", - "Hanifi_Rohingya", - "Hanunoo", - "Hatran", - "Hebrew", - "Hex_Digit", - "Hiragana", - "Hyphen", - "IDS_Binary_Operator", - "IDS_Trinary_Operator", - "Ideographic", - "Imperial_Aramaic", - "In", - "Inherited", - "Inscriptional_Pahlavi", - "Inscriptional_Parthian", - "Is", - "IsControl", - "IsDigit", - "IsGraphic", - "IsLetter", - "IsLower", - "IsMark", - "IsNumber", - "IsOneOf", - "IsPrint", - "IsPunct", - "IsSpace", - "IsSymbol", - "IsTitle", - "IsUpper", - "Javanese", - "Join_Control", - "Kaithi", - "Kannada", - "Katakana", - "Kawi", - "Kayah_Li", - "Kharoshthi", - "Khitan_Small_Script", - "Khmer", - "Khojki", - "Khudawadi", - "L", - "Lao", - "Latin", - "Lepcha", - "Letter", - "Limbu", - "Linear_A", - "Linear_B", - "Lisu", - "Ll", - "Lm", - "Lo", - "Logical_Order_Exception", - "Lower", - "LowerCase", - "Lt", - "Lu", - "Lycian", - "Lydian", - "M", - "Mahajani", - "Makasar", - "Malayalam", - "Mandaic", - "Manichaean", - "Marchen", - "Mark", - "Masaram_Gondi", - "MaxASCII", - "MaxCase", - "MaxLatin1", - "MaxRune", - "Mc", - "Me", - "Medefaidrin", - "Meetei_Mayek", - "Mende_Kikakui", - "Meroitic_Cursive", - "Meroitic_Hieroglyphs", - "Miao", - "Mn", - "Modi", - "Mongolian", - "Mro", - "Multani", - "Myanmar", - "N", - "Nabataean", - "Nag_Mundari", - "Nandinagari", - "Nd", - "New_Tai_Lue", - "Newa", - "Nko", - "Nl", - "No", - "Noncharacter_Code_Point", - "Number", - "Nushu", - "Nyiakeng_Puachue_Hmong", - "Ogham", - "Ol_Chiki", - "Old_Hungarian", - "Old_Italic", - "Old_North_Arabian", - "Old_Permic", - "Old_Persian", - "Old_Sogdian", - "Old_South_Arabian", - "Old_Turkic", - "Old_Uyghur", - "Oriya", - "Osage", - "Osmanya", - "Other", - "Other_Alphabetic", - "Other_Default_Ignorable_Code_Point", - "Other_Grapheme_Extend", - "Other_ID_Continue", - "Other_ID_Start", - "Other_Lowercase", - "Other_Math", - "Other_Uppercase", - "P", - "Pahawh_Hmong", - "Palmyrene", - "Pattern_Syntax", - "Pattern_White_Space", - "Pau_Cin_Hau", - "Pc", - "Pd", - "Pe", - "Pf", - "Phags_Pa", - "Phoenician", - "Pi", - "Po", - "Prepended_Concatenation_Mark", - "PrintRanges", - "Properties", - "Ps", - "Psalter_Pahlavi", - "Punct", - "Quotation_Mark", - "Radical", - "Range16", - "Range32", - "RangeTable", - "Regional_Indicator", - "Rejang", - "ReplacementChar", - "Runic", - "S", - "STerm", - "Samaritan", - "Saurashtra", - "Sc", - "Scripts", - "Sentence_Terminal", - "Sharada", - "Shavian", - "Siddham", - "SignWriting", - "SimpleFold", - "Sinhala", - "Sk", - "Sm", - "So", - "Soft_Dotted", - "Sogdian", - "Sora_Sompeng", - "Soyombo", - "Space", - "SpecialCase", - "Sundanese", - "Syloti_Nagri", - "Symbol", - "Syriac", - "Tagalog", - "Tagbanwa", - "Tai_Le", - "Tai_Tham", - "Tai_Viet", - "Takri", - "Tamil", - "Tangsa", - "Tangut", - "Telugu", - "Terminal_Punctuation", - "Thaana", - "Thai", - "Tibetan", - "Tifinagh", - "Tirhuta", - "Title", - "TitleCase", - "To", - "ToLower", - "ToTitle", - "ToUpper", - "Toto", - "TurkishCase", - "Ugaritic", - "Unified_Ideograph", - "Upper", - "UpperCase", - "UpperLower", - "Vai", - "Variation_Selector", - "Version", - "Vithkuqi", - "Wancho", - "Warang_Citi", - "White_Space", - "Yezidi", - "Yi", - "Z", - "Zanabazar_Square", - "Zl", - "Zp", - "Zs", - }, - "unicode/utf16": { - "AppendRune", - "Decode", - "DecodeRune", - "Encode", - "EncodeRune", - "IsSurrogate", - }, - "unicode/utf8": { - "AppendRune", - "DecodeLastRune", - "DecodeLastRuneInString", - "DecodeRune", - "DecodeRuneInString", - "EncodeRune", - "FullRune", - "FullRuneInString", - "MaxRune", - "RuneCount", - "RuneCountInString", - "RuneError", - "RuneLen", - "RuneSelf", - "RuneStart", - "UTFMax", - "Valid", - "ValidRune", - "ValidString", - }, - "unsafe": { - "Add", - "Alignof", - "Offsetof", - "Pointer", - "Sizeof", - "Slice", - "SliceData", - "String", - "StringData", - }, -} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f..44719de1 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index b92e8e6e..2acd8585 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -23,6 +23,9 @@ type PkgDecoder struct { // version is the file format version. version uint32 + // aliases determines whether types.Aliases should be created + aliases bool + // sync indicates whether the file uses sync markers. sync bool @@ -73,6 +76,7 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, + //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go new file mode 100644 index 00000000..fd689207 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -0,0 +1,17320 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +var PackageSymbols = map[string][]Symbol{ + "archive/tar": { + {"(*Header).FileInfo", Method, 1}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteHeader", Method, 0}, + {"(Format).String", Method, 10}, + {"ErrFieldTooLong", Var, 0}, + {"ErrHeader", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"ErrWriteAfterClose", Var, 0}, + {"ErrWriteTooLong", Var, 0}, + {"FileInfoHeader", Func, 1}, + {"Format", Type, 10}, + {"FormatGNU", Const, 10}, + {"FormatPAX", Const, 10}, + {"FormatUSTAR", Const, 10}, + {"FormatUnknown", Const, 10}, + {"Header", Type, 0}, + {"Header.AccessTime", Field, 0}, + {"Header.ChangeTime", Field, 0}, + {"Header.Devmajor", Field, 0}, + {"Header.Devminor", Field, 0}, + {"Header.Format", Field, 10}, + {"Header.Gid", Field, 0}, + {"Header.Gname", Field, 0}, + {"Header.Linkname", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Mode", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.PAXRecords", Field, 10}, + {"Header.Size", Field, 0}, + {"Header.Typeflag", Field, 0}, + {"Header.Uid", Field, 0}, + {"Header.Uname", Field, 0}, + {"Header.Xattrs", Field, 3}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Reader", Type, 0}, + {"TypeBlock", Const, 0}, + {"TypeChar", Const, 0}, + {"TypeCont", Const, 0}, + {"TypeDir", Const, 0}, + {"TypeFifo", Const, 0}, + {"TypeGNULongLink", Const, 1}, + {"TypeGNULongName", Const, 1}, + {"TypeGNUSparse", Const, 3}, + {"TypeLink", Const, 0}, + {"TypeReg", Const, 0}, + {"TypeRegA", Const, 0}, + {"TypeSymlink", Const, 0}, + {"TypeXGlobalHeader", Const, 0}, + {"TypeXHeader", Const, 0}, + {"Writer", Type, 0}, + }, + "archive/zip": { + {"(*File).DataOffset", Method, 2}, + {"(*File).FileInfo", Method, 0}, + {"(*File).ModTime", Method, 0}, + {"(*File).Mode", Method, 0}, + {"(*File).Open", Method, 0}, + {"(*File).OpenRaw", Method, 17}, + {"(*File).SetModTime", Method, 0}, + {"(*File).SetMode", Method, 0}, + {"(*FileHeader).FileInfo", Method, 0}, + {"(*FileHeader).ModTime", Method, 0}, + {"(*FileHeader).Mode", Method, 0}, + {"(*FileHeader).SetModTime", Method, 0}, + {"(*FileHeader).SetMode", Method, 0}, + {"(*ReadCloser).Close", Method, 0}, + {"(*ReadCloser).Open", Method, 16}, + {"(*ReadCloser).RegisterDecompressor", Method, 6}, + {"(*Reader).Open", Method, 16}, + {"(*Reader).RegisterDecompressor", Method, 6}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Copy", Method, 17}, + {"(*Writer).Create", Method, 0}, + {"(*Writer).CreateHeader", Method, 0}, + {"(*Writer).CreateRaw", Method, 17}, + {"(*Writer).Flush", Method, 4}, + {"(*Writer).RegisterCompressor", Method, 6}, + {"(*Writer).SetComment", Method, 10}, + {"(*Writer).SetOffset", Method, 5}, + {"Compressor", Type, 2}, + {"Decompressor", Type, 2}, + {"Deflate", Const, 0}, + {"ErrAlgorithm", Var, 0}, + {"ErrChecksum", Var, 0}, + {"ErrFormat", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.CRC32", Field, 0}, + {"FileHeader.Comment", Field, 0}, + {"FileHeader.CompressedSize", Field, 0}, + {"FileHeader.CompressedSize64", Field, 1}, + {"FileHeader.CreatorVersion", Field, 0}, + {"FileHeader.ExternalAttrs", Field, 0}, + {"FileHeader.Extra", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Method", Field, 0}, + {"FileHeader.Modified", Field, 10}, + {"FileHeader.ModifiedDate", Field, 0}, + {"FileHeader.ModifiedTime", Field, 0}, + {"FileHeader.Name", Field, 0}, + {"FileHeader.NonUTF8", Field, 10}, + {"FileHeader.ReaderVersion", Field, 0}, + {"FileHeader.UncompressedSize", Field, 0}, + {"FileHeader.UncompressedSize64", Field, 1}, + {"FileInfoHeader", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"OpenReader", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadCloser.Reader", Field, 0}, + {"Reader", Type, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.File", Field, 0}, + {"RegisterCompressor", Func, 2}, + {"RegisterDecompressor", Func, 2}, + {"Store", Const, 0}, + {"Writer", Type, 0}, + }, + "bufio": { + {"(*Reader).Buffered", Method, 0}, + {"(*Reader).Discard", Method, 5}, + {"(*Reader).Peek", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadBytes", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).ReadSlice", Method, 0}, + {"(*Reader).ReadString", Method, 0}, + {"(*Reader).Reset", Method, 2}, + {"(*Reader).Size", Method, 10}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Scanner).Buffer", Method, 6}, + {"(*Scanner).Bytes", Method, 1}, + {"(*Scanner).Err", Method, 1}, + {"(*Scanner).Scan", Method, 1}, + {"(*Scanner).Split", Method, 1}, + {"(*Scanner).Text", Method, 1}, + {"(*Writer).Available", Method, 0}, + {"(*Writer).AvailableBuffer", Method, 18}, + {"(*Writer).Buffered", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).ReadFrom", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Size", Method, 10}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteByte", Method, 0}, + {"(*Writer).WriteRune", Method, 0}, + {"(*Writer).WriteString", Method, 0}, + {"(ReadWriter).Available", Method, 0}, + {"(ReadWriter).AvailableBuffer", Method, 18}, + {"(ReadWriter).Discard", Method, 5}, + {"(ReadWriter).Flush", Method, 0}, + {"(ReadWriter).Peek", Method, 0}, + {"(ReadWriter).Read", Method, 0}, + {"(ReadWriter).ReadByte", Method, 0}, + {"(ReadWriter).ReadBytes", Method, 0}, + {"(ReadWriter).ReadFrom", Method, 1}, + {"(ReadWriter).ReadLine", Method, 0}, + {"(ReadWriter).ReadRune", Method, 0}, + {"(ReadWriter).ReadSlice", Method, 0}, + {"(ReadWriter).ReadString", Method, 0}, + {"(ReadWriter).UnreadByte", Method, 0}, + {"(ReadWriter).UnreadRune", Method, 0}, + {"(ReadWriter).Write", Method, 0}, + {"(ReadWriter).WriteByte", Method, 0}, + {"(ReadWriter).WriteRune", Method, 0}, + {"(ReadWriter).WriteString", Method, 0}, + {"(ReadWriter).WriteTo", Method, 1}, + {"ErrAdvanceTooFar", Var, 1}, + {"ErrBadReadCount", Var, 15}, + {"ErrBufferFull", Var, 0}, + {"ErrFinalToken", Var, 6}, + {"ErrInvalidUnreadByte", Var, 0}, + {"ErrInvalidUnreadRune", Var, 0}, + {"ErrNegativeAdvance", Var, 1}, + {"ErrNegativeCount", Var, 0}, + {"ErrTooLong", Var, 1}, + {"MaxScanTokenSize", Const, 1}, + {"NewReadWriter", Func, 0}, + {"NewReader", Func, 0}, + {"NewReaderSize", Func, 0}, + {"NewScanner", Func, 1}, + {"NewWriter", Func, 0}, + {"NewWriterSize", Func, 0}, + {"ReadWriter", Type, 0}, + {"ReadWriter.Reader", Field, 0}, + {"ReadWriter.Writer", Field, 0}, + {"Reader", Type, 0}, + {"ScanBytes", Func, 1}, + {"ScanLines", Func, 1}, + {"ScanRunes", Func, 1}, + {"ScanWords", Func, 1}, + {"Scanner", Type, 1}, + {"SplitFunc", Type, 1}, + {"Writer", Type, 0}, + }, + "bytes": { + {"(*Buffer).Available", Method, 21}, + {"(*Buffer).AvailableBuffer", Method, 21}, + {"(*Buffer).Bytes", Method, 0}, + {"(*Buffer).Cap", Method, 5}, + {"(*Buffer).Grow", Method, 1}, + {"(*Buffer).Len", Method, 0}, + {"(*Buffer).Next", Method, 0}, + {"(*Buffer).Read", Method, 0}, + {"(*Buffer).ReadByte", Method, 0}, + {"(*Buffer).ReadBytes", Method, 0}, + {"(*Buffer).ReadFrom", Method, 0}, + {"(*Buffer).ReadRune", Method, 0}, + {"(*Buffer).ReadString", Method, 0}, + {"(*Buffer).Reset", Method, 0}, + {"(*Buffer).String", Method, 0}, + {"(*Buffer).Truncate", Method, 0}, + {"(*Buffer).UnreadByte", Method, 0}, + {"(*Buffer).UnreadRune", Method, 0}, + {"(*Buffer).Write", Method, 0}, + {"(*Buffer).WriteByte", Method, 0}, + {"(*Buffer).WriteRune", Method, 0}, + {"(*Buffer).WriteString", Method, 0}, + {"(*Buffer).WriteTo", Method, 0}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"Buffer", Type, 0}, + {"Clone", Func, 20}, + {"Compare", Func, 0}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 7}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 7}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"Equal", Func, 0}, + {"EqualFold", Func, 0}, + {"ErrTooLarge", Var, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 0}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"MinRead", Const, 0}, + {"NewBuffer", Func, 0}, + {"NewBufferString", Func, 0}, + {"NewReader", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Runes", Func, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "cmp": { + {"Compare", Func, 21}, + {"Less", Func, 21}, + {"Or", Func, 22}, + {"Ordered", Type, 21}, + }, + "compress/bzip2": { + {"(StructuralError).Error", Method, 0}, + {"NewReader", Func, 0}, + {"StructuralError", Type, 0}, + }, + "compress/flate": { + {"(*ReadError).Error", Method, 0}, + {"(*WriteError).Error", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(InternalError).Error", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"CorruptInputError", Type, 0}, + {"DefaultCompression", Const, 0}, + {"HuffmanOnly", Const, 7}, + {"InternalError", Type, 0}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterDict", Func, 0}, + {"NoCompression", Const, 0}, + {"ReadError", Type, 0}, + {"ReadError.Err", Field, 0}, + {"ReadError.Offset", Field, 0}, + {"Reader", Type, 0}, + {"Resetter", Type, 4}, + {"WriteError", Type, 0}, + {"WriteError.Err", Field, 0}, + {"WriteError.Offset", Field, 0}, + {"Writer", Type, 0}, + }, + "compress/gzip": { + {"(*Reader).Close", Method, 0}, + {"(*Reader).Multistream", Method, 4}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).Reset", Method, 3}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrHeader", Var, 0}, + {"Header", Type, 0}, + {"Header.Comment", Field, 0}, + {"Header.Extra", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.OS", Field, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NoCompression", Const, 0}, + {"Reader", Type, 0}, + {"Reader.Header", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Header", Field, 0}, + }, + "compress/lzw": { + {"(*Reader).Close", Method, 17}, + {"(*Reader).Read", Method, 17}, + {"(*Reader).Reset", Method, 17}, + {"(*Writer).Close", Method, 17}, + {"(*Writer).Reset", Method, 17}, + {"(*Writer).Write", Method, 17}, + {"LSB", Const, 0}, + {"MSB", Const, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Order", Type, 0}, + {"Reader", Type, 17}, + {"Writer", Type, 17}, + }, + "compress/zlib": { + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrDictionary", Var, 0}, + {"ErrHeader", Var, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NewWriterLevelDict", Func, 0}, + {"NoCompression", Const, 0}, + {"Resetter", Type, 4}, + {"Writer", Type, 0}, + }, + "container/heap": { + {"Fix", Func, 2}, + {"Init", Func, 0}, + {"Interface", Type, 0}, + {"Pop", Func, 0}, + {"Push", Func, 0}, + {"Remove", Func, 0}, + }, + "container/list": { + {"(*Element).Next", Method, 0}, + {"(*Element).Prev", Method, 0}, + {"(*List).Back", Method, 0}, + {"(*List).Front", Method, 0}, + {"(*List).Init", Method, 0}, + {"(*List).InsertAfter", Method, 0}, + {"(*List).InsertBefore", Method, 0}, + {"(*List).Len", Method, 0}, + {"(*List).MoveAfter", Method, 2}, + {"(*List).MoveBefore", Method, 2}, + {"(*List).MoveToBack", Method, 0}, + {"(*List).MoveToFront", Method, 0}, + {"(*List).PushBack", Method, 0}, + {"(*List).PushBackList", Method, 0}, + {"(*List).PushFront", Method, 0}, + {"(*List).PushFrontList", Method, 0}, + {"(*List).Remove", Method, 0}, + {"Element", Type, 0}, + {"Element.Value", Field, 0}, + {"List", Type, 0}, + {"New", Func, 0}, + }, + "container/ring": { + {"(*Ring).Do", Method, 0}, + {"(*Ring).Len", Method, 0}, + {"(*Ring).Link", Method, 0}, + {"(*Ring).Move", Method, 0}, + {"(*Ring).Next", Method, 0}, + {"(*Ring).Prev", Method, 0}, + {"(*Ring).Unlink", Method, 0}, + {"New", Func, 0}, + {"Ring", Type, 0}, + {"Ring.Value", Field, 0}, + }, + "context": { + {"AfterFunc", Func, 21}, + {"Background", Func, 7}, + {"CancelCauseFunc", Type, 20}, + {"CancelFunc", Type, 7}, + {"Canceled", Var, 7}, + {"Cause", Func, 20}, + {"Context", Type, 7}, + {"DeadlineExceeded", Var, 7}, + {"TODO", Func, 7}, + {"WithCancel", Func, 7}, + {"WithCancelCause", Func, 20}, + {"WithDeadline", Func, 7}, + {"WithDeadlineCause", Func, 21}, + {"WithTimeout", Func, 7}, + {"WithTimeoutCause", Func, 21}, + {"WithValue", Func, 7}, + {"WithoutCancel", Func, 21}, + }, + "crypto": { + {"(Hash).Available", Method, 0}, + {"(Hash).HashFunc", Method, 4}, + {"(Hash).New", Method, 0}, + {"(Hash).Size", Method, 0}, + {"(Hash).String", Method, 15}, + {"BLAKE2b_256", Const, 9}, + {"BLAKE2b_384", Const, 9}, + {"BLAKE2b_512", Const, 9}, + {"BLAKE2s_256", Const, 9}, + {"Decrypter", Type, 5}, + {"DecrypterOpts", Type, 5}, + {"Hash", Type, 0}, + {"MD4", Const, 0}, + {"MD5", Const, 0}, + {"MD5SHA1", Const, 0}, + {"PrivateKey", Type, 0}, + {"PublicKey", Type, 2}, + {"RIPEMD160", Const, 0}, + {"RegisterHash", Func, 0}, + {"SHA1", Const, 0}, + {"SHA224", Const, 0}, + {"SHA256", Const, 0}, + {"SHA384", Const, 0}, + {"SHA3_224", Const, 4}, + {"SHA3_256", Const, 4}, + {"SHA3_384", Const, 4}, + {"SHA3_512", Const, 4}, + {"SHA512", Const, 0}, + {"SHA512_224", Const, 5}, + {"SHA512_256", Const, 5}, + {"Signer", Type, 4}, + {"SignerOpts", Type, 4}, + }, + "crypto/aes": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/cipher": { + {"(StreamReader).Read", Method, 0}, + {"(StreamWriter).Close", Method, 0}, + {"(StreamWriter).Write", Method, 0}, + {"AEAD", Type, 2}, + {"Block", Type, 0}, + {"BlockMode", Type, 0}, + {"NewCBCDecrypter", Func, 0}, + {"NewCBCEncrypter", Func, 0}, + {"NewCFBDecrypter", Func, 0}, + {"NewCFBEncrypter", Func, 0}, + {"NewCTR", Func, 0}, + {"NewGCM", Func, 2}, + {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithTagSize", Func, 11}, + {"NewOFB", Func, 0}, + {"Stream", Type, 0}, + {"StreamReader", Type, 0}, + {"StreamReader.R", Field, 0}, + {"StreamReader.S", Field, 0}, + {"StreamWriter", Type, 0}, + {"StreamWriter.Err", Field, 0}, + {"StreamWriter.S", Field, 0}, + {"StreamWriter.W", Field, 0}, + }, + "crypto/des": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + {"NewTripleDESCipher", Func, 0}, + }, + "crypto/dsa": { + {"ErrInvalidPublicKey", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateParameters", Func, 0}, + {"L1024N160", Const, 0}, + {"L2048N224", Const, 0}, + {"L2048N256", Const, 0}, + {"L3072N256", Const, 0}, + {"ParameterSizes", Type, 0}, + {"Parameters", Type, 0}, + {"Parameters.G", Field, 0}, + {"Parameters.P", Field, 0}, + {"Parameters.Q", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PrivateKey.X", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Parameters", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"Verify", Func, 0}, + }, + "crypto/ecdh": { + {"(*PrivateKey).Bytes", Method, 20}, + {"(*PrivateKey).Curve", Method, 20}, + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 20}, + {"(*PrivateKey).Public", Method, 20}, + {"(*PrivateKey).PublicKey", Method, 20}, + {"(*PublicKey).Bytes", Method, 20}, + {"(*PublicKey).Curve", Method, 20}, + {"(*PublicKey).Equal", Method, 20}, + {"Curve", Type, 20}, + {"P256", Func, 20}, + {"P384", Func, 20}, + {"P521", Func, 20}, + {"PrivateKey", Type, 20}, + {"PublicKey", Type, 20}, + {"X25519", Func, 20}, + }, + "crypto/ecdsa": { + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PublicKey).ECDH", Method, 20}, + {"(*PublicKey).Equal", Method, 15}, + {"(PrivateKey).Add", Method, 0}, + {"(PrivateKey).Double", Method, 0}, + {"(PrivateKey).IsOnCurve", Method, 0}, + {"(PrivateKey).Params", Method, 0}, + {"(PrivateKey).ScalarBaseMult", Method, 0}, + {"(PrivateKey).ScalarMult", Method, 0}, + {"(PublicKey).Add", Method, 0}, + {"(PublicKey).Double", Method, 0}, + {"(PublicKey).IsOnCurve", Method, 0}, + {"(PublicKey).Params", Method, 0}, + {"(PublicKey).ScalarBaseMult", Method, 0}, + {"(PublicKey).ScalarMult", Method, 0}, + {"GenerateKey", Func, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Curve", Field, 0}, + {"PublicKey.X", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"SignASN1", Func, 15}, + {"Verify", Func, 0}, + {"VerifyASN1", Func, 15}, + }, + "crypto/ed25519": { + {"(*Options).HashFunc", Method, 20}, + {"(PrivateKey).Equal", Method, 15}, + {"(PrivateKey).Public", Method, 13}, + {"(PrivateKey).Seed", Method, 13}, + {"(PrivateKey).Sign", Method, 13}, + {"(PublicKey).Equal", Method, 15}, + {"GenerateKey", Func, 13}, + {"NewKeyFromSeed", Func, 13}, + {"Options", Type, 20}, + {"Options.Context", Field, 20}, + {"Options.Hash", Field, 20}, + {"PrivateKey", Type, 13}, + {"PrivateKeySize", Const, 13}, + {"PublicKey", Type, 13}, + {"PublicKeySize", Const, 13}, + {"SeedSize", Const, 13}, + {"Sign", Func, 13}, + {"SignatureSize", Const, 13}, + {"Verify", Func, 13}, + {"VerifyWithOptions", Func, 20}, + }, + "crypto/elliptic": { + {"(*CurveParams).Add", Method, 0}, + {"(*CurveParams).Double", Method, 0}, + {"(*CurveParams).IsOnCurve", Method, 0}, + {"(*CurveParams).Params", Method, 0}, + {"(*CurveParams).ScalarBaseMult", Method, 0}, + {"(*CurveParams).ScalarMult", Method, 0}, + {"Curve", Type, 0}, + {"CurveParams", Type, 0}, + {"CurveParams.B", Field, 0}, + {"CurveParams.BitSize", Field, 0}, + {"CurveParams.Gx", Field, 0}, + {"CurveParams.Gy", Field, 0}, + {"CurveParams.N", Field, 0}, + {"CurveParams.Name", Field, 5}, + {"CurveParams.P", Field, 0}, + {"GenerateKey", Func, 0}, + {"Marshal", Func, 0}, + {"MarshalCompressed", Func, 15}, + {"P224", Func, 0}, + {"P256", Func, 0}, + {"P384", Func, 0}, + {"P521", Func, 0}, + {"Unmarshal", Func, 0}, + {"UnmarshalCompressed", Func, 15}, + }, + "crypto/hmac": { + {"Equal", Func, 1}, + {"New", Func, 0}, + }, + "crypto/md5": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/rand": { + {"Int", Func, 0}, + {"Prime", Func, 0}, + {"Read", Func, 0}, + {"Reader", Var, 0}, + }, + "crypto/rc4": { + {"(*Cipher).Reset", Method, 0}, + {"(*Cipher).XORKeyStream", Method, 0}, + {"(KeySizeError).Error", Method, 0}, + {"Cipher", Type, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/rsa": { + {"(*PSSOptions).HashFunc", Method, 4}, + {"(*PrivateKey).Decrypt", Method, 5}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Precompute", Method, 0}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PrivateKey).Size", Method, 11}, + {"(*PrivateKey).Validate", Method, 0}, + {"(*PublicKey).Equal", Method, 15}, + {"(*PublicKey).Size", Method, 11}, + {"CRTValue", Type, 0}, + {"CRTValue.Coeff", Field, 0}, + {"CRTValue.Exp", Field, 0}, + {"CRTValue.R", Field, 0}, + {"DecryptOAEP", Func, 0}, + {"DecryptPKCS1v15", Func, 0}, + {"DecryptPKCS1v15SessionKey", Func, 0}, + {"EncryptOAEP", Func, 0}, + {"EncryptPKCS1v15", Func, 0}, + {"ErrDecryption", Var, 0}, + {"ErrMessageTooLong", Var, 0}, + {"ErrVerification", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateMultiPrimeKey", Func, 0}, + {"OAEPOptions", Type, 5}, + {"OAEPOptions.Hash", Field, 5}, + {"OAEPOptions.Label", Field, 5}, + {"OAEPOptions.MGFHash", Field, 20}, + {"PKCS1v15DecryptOptions", Type, 5}, + {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5}, + {"PSSOptions", Type, 2}, + {"PSSOptions.Hash", Field, 4}, + {"PSSOptions.SaltLength", Field, 2}, + {"PSSSaltLengthAuto", Const, 2}, + {"PSSSaltLengthEqualsHash", Const, 2}, + {"PrecomputedValues", Type, 0}, + {"PrecomputedValues.CRTValues", Field, 0}, + {"PrecomputedValues.Dp", Field, 0}, + {"PrecomputedValues.Dq", Field, 0}, + {"PrecomputedValues.Qinv", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.Precomputed", Field, 0}, + {"PrivateKey.Primes", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.E", Field, 0}, + {"PublicKey.N", Field, 0}, + {"SignPKCS1v15", Func, 0}, + {"SignPSS", Func, 2}, + {"VerifyPKCS1v15", Func, 0}, + {"VerifyPSS", Func, 2}, + }, + "crypto/sha1": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/sha256": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New224", Func, 0}, + {"Size", Const, 0}, + {"Size224", Const, 0}, + {"Sum224", Func, 2}, + {"Sum256", Func, 2}, + }, + "crypto/sha512": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New384", Func, 0}, + {"New512_224", Func, 5}, + {"New512_256", Func, 5}, + {"Size", Const, 0}, + {"Size224", Const, 5}, + {"Size256", Const, 5}, + {"Size384", Const, 0}, + {"Sum384", Func, 2}, + {"Sum512", Func, 2}, + {"Sum512_224", Func, 5}, + {"Sum512_256", Func, 5}, + }, + "crypto/subtle": { + {"ConstantTimeByteEq", Func, 0}, + {"ConstantTimeCompare", Func, 0}, + {"ConstantTimeCopy", Func, 0}, + {"ConstantTimeEq", Func, 0}, + {"ConstantTimeLessOrEq", Func, 2}, + {"ConstantTimeSelect", Func, 0}, + {"XORBytes", Func, 20}, + }, + "crypto/tls": { + {"(*CertificateRequestInfo).Context", Method, 17}, + {"(*CertificateRequestInfo).SupportsCertificate", Method, 14}, + {"(*CertificateVerificationError).Error", Method, 20}, + {"(*CertificateVerificationError).Unwrap", Method, 20}, + {"(*ClientHelloInfo).Context", Method, 17}, + {"(*ClientHelloInfo).SupportsCertificate", Method, 14}, + {"(*ClientSessionState).ResumptionState", Method, 21}, + {"(*Config).BuildNameToCertificate", Method, 0}, + {"(*Config).Clone", Method, 8}, + {"(*Config).DecryptTicket", Method, 21}, + {"(*Config).EncryptTicket", Method, 21}, + {"(*Config).SetSessionTicketKeys", Method, 5}, + {"(*Conn).Close", Method, 0}, + {"(*Conn).CloseWrite", Method, 8}, + {"(*Conn).ConnectionState", Method, 0}, + {"(*Conn).Handshake", Method, 0}, + {"(*Conn).HandshakeContext", Method, 17}, + {"(*Conn).LocalAddr", Method, 0}, + {"(*Conn).NetConn", Method, 18}, + {"(*Conn).OCSPResponse", Method, 0}, + {"(*Conn).Read", Method, 0}, + {"(*Conn).RemoteAddr", Method, 0}, + {"(*Conn).SetDeadline", Method, 0}, + {"(*Conn).SetReadDeadline", Method, 0}, + {"(*Conn).SetWriteDeadline", Method, 0}, + {"(*Conn).VerifyHostname", Method, 0}, + {"(*Conn).Write", Method, 0}, + {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, + {"(*Dialer).Dial", Method, 15}, + {"(*Dialer).DialContext", Method, 15}, + {"(*QUICConn).Close", Method, 21}, + {"(*QUICConn).ConnectionState", Method, 21}, + {"(*QUICConn).HandleData", Method, 21}, + {"(*QUICConn).NextEvent", Method, 21}, + {"(*QUICConn).SendSessionTicket", Method, 21}, + {"(*QUICConn).SetTransportParameters", Method, 21}, + {"(*QUICConn).Start", Method, 21}, + {"(*SessionState).Bytes", Method, 21}, + {"(AlertError).Error", Method, 21}, + {"(ClientAuthType).String", Method, 15}, + {"(CurveID).String", Method, 15}, + {"(QUICEncryptionLevel).String", Method, 21}, + {"(RecordHeaderError).Error", Method, 6}, + {"(SignatureScheme).String", Method, 15}, + {"AlertError", Type, 21}, + {"Certificate", Type, 0}, + {"Certificate.Certificate", Field, 0}, + {"Certificate.Leaf", Field, 0}, + {"Certificate.OCSPStaple", Field, 0}, + {"Certificate.PrivateKey", Field, 0}, + {"Certificate.SignedCertificateTimestamps", Field, 5}, + {"Certificate.SupportedSignatureAlgorithms", Field, 14}, + {"CertificateRequestInfo", Type, 8}, + {"CertificateRequestInfo.AcceptableCAs", Field, 8}, + {"CertificateRequestInfo.SignatureSchemes", Field, 8}, + {"CertificateRequestInfo.Version", Field, 14}, + {"CertificateVerificationError", Type, 20}, + {"CertificateVerificationError.Err", Field, 20}, + {"CertificateVerificationError.UnverifiedCertificates", Field, 20}, + {"CipherSuite", Type, 14}, + {"CipherSuite.ID", Field, 14}, + {"CipherSuite.Insecure", Field, 14}, + {"CipherSuite.Name", Field, 14}, + {"CipherSuite.SupportedVersions", Field, 14}, + {"CipherSuiteName", Func, 14}, + {"CipherSuites", Func, 14}, + {"Client", Func, 0}, + {"ClientAuthType", Type, 0}, + {"ClientHelloInfo", Type, 4}, + {"ClientHelloInfo.CipherSuites", Field, 4}, + {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.ServerName", Field, 4}, + {"ClientHelloInfo.SignatureSchemes", Field, 8}, + {"ClientHelloInfo.SupportedCurves", Field, 4}, + {"ClientHelloInfo.SupportedPoints", Field, 4}, + {"ClientHelloInfo.SupportedProtos", Field, 8}, + {"ClientHelloInfo.SupportedVersions", Field, 8}, + {"ClientSessionCache", Type, 3}, + {"ClientSessionState", Type, 3}, + {"Config", Type, 0}, + {"Config.Certificates", Field, 0}, + {"Config.CipherSuites", Field, 0}, + {"Config.ClientAuth", Field, 0}, + {"Config.ClientCAs", Field, 0}, + {"Config.ClientSessionCache", Field, 3}, + {"Config.CurvePreferences", Field, 3}, + {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.GetCertificate", Field, 4}, + {"Config.GetClientCertificate", Field, 8}, + {"Config.GetConfigForClient", Field, 8}, + {"Config.InsecureSkipVerify", Field, 0}, + {"Config.KeyLogWriter", Field, 8}, + {"Config.MaxVersion", Field, 2}, + {"Config.MinVersion", Field, 2}, + {"Config.NameToCertificate", Field, 0}, + {"Config.NextProtos", Field, 0}, + {"Config.PreferServerCipherSuites", Field, 1}, + {"Config.Rand", Field, 0}, + {"Config.Renegotiation", Field, 7}, + {"Config.RootCAs", Field, 0}, + {"Config.ServerName", Field, 0}, + {"Config.SessionTicketKey", Field, 1}, + {"Config.SessionTicketsDisabled", Field, 1}, + {"Config.Time", Field, 0}, + {"Config.UnwrapSession", Field, 21}, + {"Config.VerifyConnection", Field, 15}, + {"Config.VerifyPeerCertificate", Field, 8}, + {"Config.WrapSession", Field, 21}, + {"Conn", Type, 0}, + {"ConnectionState", Type, 0}, + {"ConnectionState.CipherSuite", Field, 0}, + {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.HandshakeComplete", Field, 0}, + {"ConnectionState.NegotiatedProtocol", Field, 0}, + {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, + {"ConnectionState.OCSPResponse", Field, 5}, + {"ConnectionState.PeerCertificates", Field, 0}, + {"ConnectionState.ServerName", Field, 0}, + {"ConnectionState.SignedCertificateTimestamps", Field, 5}, + {"ConnectionState.TLSUnique", Field, 4}, + {"ConnectionState.VerifiedChains", Field, 0}, + {"ConnectionState.Version", Field, 3}, + {"CurveID", Type, 3}, + {"CurveP256", Const, 3}, + {"CurveP384", Const, 3}, + {"CurveP521", Const, 3}, + {"Dial", Func, 0}, + {"DialWithDialer", Func, 3}, + {"Dialer", Type, 15}, + {"Dialer.Config", Field, 15}, + {"Dialer.NetDialer", Field, 15}, + {"ECDSAWithP256AndSHA256", Const, 8}, + {"ECDSAWithP384AndSHA384", Const, 8}, + {"ECDSAWithP521AndSHA512", Const, 8}, + {"ECDSAWithSHA1", Const, 10}, + {"Ed25519", Const, 13}, + {"InsecureCipherSuites", Func, 14}, + {"Listen", Func, 0}, + {"LoadX509KeyPair", Func, 0}, + {"NewLRUClientSessionCache", Func, 3}, + {"NewListener", Func, 0}, + {"NewResumptionState", Func, 21}, + {"NoClientCert", Const, 0}, + {"PKCS1WithSHA1", Const, 8}, + {"PKCS1WithSHA256", Const, 8}, + {"PKCS1WithSHA384", Const, 8}, + {"PKCS1WithSHA512", Const, 8}, + {"PSSWithSHA256", Const, 8}, + {"PSSWithSHA384", Const, 8}, + {"PSSWithSHA512", Const, 8}, + {"ParseSessionState", Func, 21}, + {"QUICClient", Func, 21}, + {"QUICConfig", Type, 21}, + {"QUICConfig.TLSConfig", Field, 21}, + {"QUICConn", Type, 21}, + {"QUICEncryptionLevel", Type, 21}, + {"QUICEncryptionLevelApplication", Const, 21}, + {"QUICEncryptionLevelEarly", Const, 21}, + {"QUICEncryptionLevelHandshake", Const, 21}, + {"QUICEncryptionLevelInitial", Const, 21}, + {"QUICEvent", Type, 21}, + {"QUICEvent.Data", Field, 21}, + {"QUICEvent.Kind", Field, 21}, + {"QUICEvent.Level", Field, 21}, + {"QUICEvent.Suite", Field, 21}, + {"QUICEventKind", Type, 21}, + {"QUICHandshakeDone", Const, 21}, + {"QUICNoEvent", Const, 21}, + {"QUICRejectedEarlyData", Const, 21}, + {"QUICServer", Func, 21}, + {"QUICSessionTicketOptions", Type, 21}, + {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSetReadSecret", Const, 21}, + {"QUICSetWriteSecret", Const, 21}, + {"QUICTransportParameters", Const, 21}, + {"QUICTransportParametersRequired", Const, 21}, + {"QUICWriteData", Const, 21}, + {"RecordHeaderError", Type, 6}, + {"RecordHeaderError.Conn", Field, 12}, + {"RecordHeaderError.Msg", Field, 6}, + {"RecordHeaderError.RecordHeader", Field, 6}, + {"RenegotiateFreelyAsClient", Const, 7}, + {"RenegotiateNever", Const, 7}, + {"RenegotiateOnceAsClient", Const, 7}, + {"RenegotiationSupport", Type, 7}, + {"RequestClientCert", Const, 0}, + {"RequireAndVerifyClientCert", Const, 0}, + {"RequireAnyClientCert", Const, 0}, + {"Server", Func, 0}, + {"SessionState", Type, 21}, + {"SessionState.EarlyData", Field, 21}, + {"SessionState.Extra", Field, 21}, + {"SignatureScheme", Type, 8}, + {"TLS_AES_128_GCM_SHA256", Const, 12}, + {"TLS_AES_256_GCM_SHA384", Const, 12}, + {"TLS_CHACHA20_POLY1305_SHA256", Const, 12}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2}, + {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0}, + {"TLS_FALLBACK_SCSV", Const, 4}, + {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6}, + {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6}, + {"TLS_RSA_WITH_RC4_128_SHA", Const, 0}, + {"VerifyClientCertIfGiven", Const, 0}, + {"VersionName", Func, 21}, + {"VersionSSL30", Const, 2}, + {"VersionTLS10", Const, 2}, + {"VersionTLS11", Const, 2}, + {"VersionTLS12", Const, 2}, + {"VersionTLS13", Const, 12}, + {"X25519", Const, 8}, + {"X509KeyPair", Func, 0}, + }, + "crypto/x509": { + {"(*CertPool).AddCert", Method, 0}, + {"(*CertPool).AddCertWithConstraint", Method, 22}, + {"(*CertPool).AppendCertsFromPEM", Method, 0}, + {"(*CertPool).Clone", Method, 19}, + {"(*CertPool).Equal", Method, 19}, + {"(*CertPool).Subjects", Method, 0}, + {"(*Certificate).CheckCRLSignature", Method, 0}, + {"(*Certificate).CheckSignature", Method, 0}, + {"(*Certificate).CheckSignatureFrom", Method, 0}, + {"(*Certificate).CreateCRL", Method, 0}, + {"(*Certificate).Equal", Method, 0}, + {"(*Certificate).Verify", Method, 0}, + {"(*Certificate).VerifyHostname", Method, 0}, + {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*RevocationList).CheckSignatureFrom", Method, 19}, + {"(CertificateInvalidError).Error", Method, 0}, + {"(ConstraintViolationError).Error", Method, 0}, + {"(HostnameError).Error", Method, 0}, + {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).Equal", Method, 22}, + {"(OID).EqualASN1OID", Method, 22}, + {"(OID).String", Method, 22}, + {"(PublicKeyAlgorithm).String", Method, 10}, + {"(SignatureAlgorithm).String", Method, 6}, + {"(SystemRootsError).Error", Method, 1}, + {"(SystemRootsError).Unwrap", Method, 16}, + {"(UnhandledCriticalExtension).Error", Method, 0}, + {"(UnknownAuthorityError).Error", Method, 0}, + {"CANotAuthorizedForExtKeyUsage", Const, 10}, + {"CANotAuthorizedForThisName", Const, 0}, + {"CertPool", Type, 0}, + {"Certificate", Type, 0}, + {"Certificate.AuthorityKeyId", Field, 0}, + {"Certificate.BasicConstraintsValid", Field, 0}, + {"Certificate.CRLDistributionPoints", Field, 2}, + {"Certificate.DNSNames", Field, 0}, + {"Certificate.EmailAddresses", Field, 0}, + {"Certificate.ExcludedDNSDomains", Field, 9}, + {"Certificate.ExcludedEmailAddresses", Field, 10}, + {"Certificate.ExcludedIPRanges", Field, 10}, + {"Certificate.ExcludedURIDomains", Field, 10}, + {"Certificate.ExtKeyUsage", Field, 0}, + {"Certificate.Extensions", Field, 2}, + {"Certificate.ExtraExtensions", Field, 2}, + {"Certificate.IPAddresses", Field, 1}, + {"Certificate.IsCA", Field, 0}, + {"Certificate.Issuer", Field, 0}, + {"Certificate.IssuingCertificateURL", Field, 2}, + {"Certificate.KeyUsage", Field, 0}, + {"Certificate.MaxPathLen", Field, 0}, + {"Certificate.MaxPathLenZero", Field, 4}, + {"Certificate.NotAfter", Field, 0}, + {"Certificate.NotBefore", Field, 0}, + {"Certificate.OCSPServer", Field, 2}, + {"Certificate.PermittedDNSDomains", Field, 0}, + {"Certificate.PermittedDNSDomainsCritical", Field, 0}, + {"Certificate.PermittedEmailAddresses", Field, 10}, + {"Certificate.PermittedIPRanges", Field, 10}, + {"Certificate.PermittedURIDomains", Field, 10}, + {"Certificate.Policies", Field, 22}, + {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PublicKey", Field, 0}, + {"Certificate.PublicKeyAlgorithm", Field, 0}, + {"Certificate.Raw", Field, 0}, + {"Certificate.RawIssuer", Field, 0}, + {"Certificate.RawSubject", Field, 0}, + {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, + {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.SerialNumber", Field, 0}, + {"Certificate.Signature", Field, 0}, + {"Certificate.SignatureAlgorithm", Field, 0}, + {"Certificate.Subject", Field, 0}, + {"Certificate.SubjectKeyId", Field, 0}, + {"Certificate.URIs", Field, 10}, + {"Certificate.UnhandledCriticalExtensions", Field, 5}, + {"Certificate.UnknownExtKeyUsage", Field, 0}, + {"Certificate.Version", Field, 0}, + {"CertificateInvalidError", Type, 0}, + {"CertificateInvalidError.Cert", Field, 0}, + {"CertificateInvalidError.Detail", Field, 10}, + {"CertificateInvalidError.Reason", Field, 0}, + {"CertificateRequest", Type, 3}, + {"CertificateRequest.Attributes", Field, 3}, + {"CertificateRequest.DNSNames", Field, 3}, + {"CertificateRequest.EmailAddresses", Field, 3}, + {"CertificateRequest.Extensions", Field, 3}, + {"CertificateRequest.ExtraExtensions", Field, 3}, + {"CertificateRequest.IPAddresses", Field, 3}, + {"CertificateRequest.PublicKey", Field, 3}, + {"CertificateRequest.PublicKeyAlgorithm", Field, 3}, + {"CertificateRequest.Raw", Field, 3}, + {"CertificateRequest.RawSubject", Field, 3}, + {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3}, + {"CertificateRequest.RawTBSCertificateRequest", Field, 3}, + {"CertificateRequest.Signature", Field, 3}, + {"CertificateRequest.SignatureAlgorithm", Field, 3}, + {"CertificateRequest.Subject", Field, 3}, + {"CertificateRequest.URIs", Field, 10}, + {"CertificateRequest.Version", Field, 3}, + {"ConstraintViolationError", Type, 0}, + {"CreateCertificate", Func, 0}, + {"CreateCertificateRequest", Func, 3}, + {"CreateRevocationList", Func, 15}, + {"DSA", Const, 0}, + {"DSAWithSHA1", Const, 0}, + {"DSAWithSHA256", Const, 0}, + {"DecryptPEMBlock", Func, 1}, + {"ECDSA", Const, 1}, + {"ECDSAWithSHA1", Const, 1}, + {"ECDSAWithSHA256", Const, 1}, + {"ECDSAWithSHA384", Const, 1}, + {"ECDSAWithSHA512", Const, 1}, + {"Ed25519", Const, 13}, + {"EncryptPEMBlock", Func, 1}, + {"ErrUnsupportedAlgorithm", Var, 0}, + {"Expired", Const, 0}, + {"ExtKeyUsage", Type, 0}, + {"ExtKeyUsageAny", Const, 0}, + {"ExtKeyUsageClientAuth", Const, 0}, + {"ExtKeyUsageCodeSigning", Const, 0}, + {"ExtKeyUsageEmailProtection", Const, 0}, + {"ExtKeyUsageIPSECEndSystem", Const, 1}, + {"ExtKeyUsageIPSECTunnel", Const, 1}, + {"ExtKeyUsageIPSECUser", Const, 1}, + {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1}, + {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1}, + {"ExtKeyUsageOCSPSigning", Const, 0}, + {"ExtKeyUsageServerAuth", Const, 0}, + {"ExtKeyUsageTimeStamping", Const, 0}, + {"HostnameError", Type, 0}, + {"HostnameError.Certificate", Field, 0}, + {"HostnameError.Host", Field, 0}, + {"IncompatibleUsage", Const, 1}, + {"IncorrectPasswordError", Var, 1}, + {"InsecureAlgorithmError", Type, 6}, + {"InvalidReason", Type, 0}, + {"IsEncryptedPEMBlock", Func, 1}, + {"KeyUsage", Type, 0}, + {"KeyUsageCRLSign", Const, 0}, + {"KeyUsageCertSign", Const, 0}, + {"KeyUsageContentCommitment", Const, 0}, + {"KeyUsageDataEncipherment", Const, 0}, + {"KeyUsageDecipherOnly", Const, 0}, + {"KeyUsageDigitalSignature", Const, 0}, + {"KeyUsageEncipherOnly", Const, 0}, + {"KeyUsageKeyAgreement", Const, 0}, + {"KeyUsageKeyEncipherment", Const, 0}, + {"MD2WithRSA", Const, 0}, + {"MD5WithRSA", Const, 0}, + {"MarshalECPrivateKey", Func, 2}, + {"MarshalPKCS1PrivateKey", Func, 0}, + {"MarshalPKCS1PublicKey", Func, 10}, + {"MarshalPKCS8PrivateKey", Func, 10}, + {"MarshalPKIXPublicKey", Func, 0}, + {"NameConstraintsWithoutSANs", Const, 10}, + {"NameMismatch", Const, 8}, + {"NewCertPool", Func, 0}, + {"NotAuthorizedToSign", Const, 0}, + {"OID", Type, 22}, + {"OIDFromInts", Func, 22}, + {"PEMCipher", Type, 1}, + {"PEMCipher3DES", Const, 1}, + {"PEMCipherAES128", Const, 1}, + {"PEMCipherAES192", Const, 1}, + {"PEMCipherAES256", Const, 1}, + {"PEMCipherDES", Const, 1}, + {"ParseCRL", Func, 0}, + {"ParseCertificate", Func, 0}, + {"ParseCertificateRequest", Func, 3}, + {"ParseCertificates", Func, 0}, + {"ParseDERCRL", Func, 0}, + {"ParseECPrivateKey", Func, 1}, + {"ParsePKCS1PrivateKey", Func, 0}, + {"ParsePKCS1PublicKey", Func, 10}, + {"ParsePKCS8PrivateKey", Func, 0}, + {"ParsePKIXPublicKey", Func, 0}, + {"ParseRevocationList", Func, 19}, + {"PublicKeyAlgorithm", Type, 0}, + {"PureEd25519", Const, 13}, + {"RSA", Const, 0}, + {"RevocationList", Type, 15}, + {"RevocationList.AuthorityKeyId", Field, 19}, + {"RevocationList.Extensions", Field, 19}, + {"RevocationList.ExtraExtensions", Field, 15}, + {"RevocationList.Issuer", Field, 19}, + {"RevocationList.NextUpdate", Field, 15}, + {"RevocationList.Number", Field, 15}, + {"RevocationList.Raw", Field, 19}, + {"RevocationList.RawIssuer", Field, 19}, + {"RevocationList.RawTBSRevocationList", Field, 19}, + {"RevocationList.RevokedCertificateEntries", Field, 21}, + {"RevocationList.RevokedCertificates", Field, 15}, + {"RevocationList.Signature", Field, 19}, + {"RevocationList.SignatureAlgorithm", Field, 15}, + {"RevocationList.ThisUpdate", Field, 15}, + {"RevocationListEntry", Type, 21}, + {"RevocationListEntry.Extensions", Field, 21}, + {"RevocationListEntry.ExtraExtensions", Field, 21}, + {"RevocationListEntry.Raw", Field, 21}, + {"RevocationListEntry.ReasonCode", Field, 21}, + {"RevocationListEntry.RevocationTime", Field, 21}, + {"RevocationListEntry.SerialNumber", Field, 21}, + {"SHA1WithRSA", Const, 0}, + {"SHA256WithRSA", Const, 0}, + {"SHA256WithRSAPSS", Const, 8}, + {"SHA384WithRSA", Const, 0}, + {"SHA384WithRSAPSS", Const, 8}, + {"SHA512WithRSA", Const, 0}, + {"SHA512WithRSAPSS", Const, 8}, + {"SetFallbackRoots", Func, 20}, + {"SignatureAlgorithm", Type, 0}, + {"SystemCertPool", Func, 7}, + {"SystemRootsError", Type, 1}, + {"SystemRootsError.Err", Field, 7}, + {"TooManyConstraints", Const, 10}, + {"TooManyIntermediates", Const, 0}, + {"UnconstrainedName", Const, 10}, + {"UnhandledCriticalExtension", Type, 0}, + {"UnknownAuthorityError", Type, 0}, + {"UnknownAuthorityError.Cert", Field, 8}, + {"UnknownPublicKeyAlgorithm", Const, 0}, + {"UnknownSignatureAlgorithm", Const, 0}, + {"VerifyOptions", Type, 0}, + {"VerifyOptions.CurrentTime", Field, 0}, + {"VerifyOptions.DNSName", Field, 0}, + {"VerifyOptions.Intermediates", Field, 0}, + {"VerifyOptions.KeyUsages", Field, 1}, + {"VerifyOptions.MaxConstraintComparisions", Field, 10}, + {"VerifyOptions.Roots", Field, 0}, + }, + "crypto/x509/pkix": { + {"(*CertificateList).HasExpired", Method, 0}, + {"(*Name).FillFromRDNSequence", Method, 0}, + {"(Name).String", Method, 10}, + {"(Name).ToRDNSequence", Method, 0}, + {"(RDNSequence).String", Method, 10}, + {"AlgorithmIdentifier", Type, 0}, + {"AlgorithmIdentifier.Algorithm", Field, 0}, + {"AlgorithmIdentifier.Parameters", Field, 0}, + {"AttributeTypeAndValue", Type, 0}, + {"AttributeTypeAndValue.Type", Field, 0}, + {"AttributeTypeAndValue.Value", Field, 0}, + {"AttributeTypeAndValueSET", Type, 3}, + {"AttributeTypeAndValueSET.Type", Field, 3}, + {"AttributeTypeAndValueSET.Value", Field, 3}, + {"CertificateList", Type, 0}, + {"CertificateList.SignatureAlgorithm", Field, 0}, + {"CertificateList.SignatureValue", Field, 0}, + {"CertificateList.TBSCertList", Field, 0}, + {"Extension", Type, 0}, + {"Extension.Critical", Field, 0}, + {"Extension.Id", Field, 0}, + {"Extension.Value", Field, 0}, + {"Name", Type, 0}, + {"Name.CommonName", Field, 0}, + {"Name.Country", Field, 0}, + {"Name.ExtraNames", Field, 5}, + {"Name.Locality", Field, 0}, + {"Name.Names", Field, 0}, + {"Name.Organization", Field, 0}, + {"Name.OrganizationalUnit", Field, 0}, + {"Name.PostalCode", Field, 0}, + {"Name.Province", Field, 0}, + {"Name.SerialNumber", Field, 0}, + {"Name.StreetAddress", Field, 0}, + {"RDNSequence", Type, 0}, + {"RelativeDistinguishedNameSET", Type, 0}, + {"RevokedCertificate", Type, 0}, + {"RevokedCertificate.Extensions", Field, 0}, + {"RevokedCertificate.RevocationTime", Field, 0}, + {"RevokedCertificate.SerialNumber", Field, 0}, + {"TBSCertificateList", Type, 0}, + {"TBSCertificateList.Extensions", Field, 0}, + {"TBSCertificateList.Issuer", Field, 0}, + {"TBSCertificateList.NextUpdate", Field, 0}, + {"TBSCertificateList.Raw", Field, 0}, + {"TBSCertificateList.RevokedCertificates", Field, 0}, + {"TBSCertificateList.Signature", Field, 0}, + {"TBSCertificateList.ThisUpdate", Field, 0}, + {"TBSCertificateList.Version", Field, 0}, + }, + "database/sql": { + {"(*ColumnType).DatabaseTypeName", Method, 8}, + {"(*ColumnType).DecimalSize", Method, 8}, + {"(*ColumnType).Length", Method, 8}, + {"(*ColumnType).Name", Method, 8}, + {"(*ColumnType).Nullable", Method, 8}, + {"(*ColumnType).ScanType", Method, 8}, + {"(*Conn).BeginTx", Method, 9}, + {"(*Conn).Close", Method, 9}, + {"(*Conn).ExecContext", Method, 9}, + {"(*Conn).PingContext", Method, 9}, + {"(*Conn).PrepareContext", Method, 9}, + {"(*Conn).QueryContext", Method, 9}, + {"(*Conn).QueryRowContext", Method, 9}, + {"(*Conn).Raw", Method, 13}, + {"(*DB).Begin", Method, 0}, + {"(*DB).BeginTx", Method, 8}, + {"(*DB).Close", Method, 0}, + {"(*DB).Conn", Method, 9}, + {"(*DB).Driver", Method, 0}, + {"(*DB).Exec", Method, 0}, + {"(*DB).ExecContext", Method, 8}, + {"(*DB).Ping", Method, 1}, + {"(*DB).PingContext", Method, 8}, + {"(*DB).Prepare", Method, 0}, + {"(*DB).PrepareContext", Method, 8}, + {"(*DB).Query", Method, 0}, + {"(*DB).QueryContext", Method, 8}, + {"(*DB).QueryRow", Method, 0}, + {"(*DB).QueryRowContext", Method, 8}, + {"(*DB).SetConnMaxIdleTime", Method, 15}, + {"(*DB).SetConnMaxLifetime", Method, 6}, + {"(*DB).SetMaxIdleConns", Method, 1}, + {"(*DB).SetMaxOpenConns", Method, 2}, + {"(*DB).Stats", Method, 5}, + {"(*Null).Scan", Method, 22}, + {"(*NullBool).Scan", Method, 0}, + {"(*NullByte).Scan", Method, 17}, + {"(*NullFloat64).Scan", Method, 0}, + {"(*NullInt16).Scan", Method, 17}, + {"(*NullInt32).Scan", Method, 13}, + {"(*NullInt64).Scan", Method, 0}, + {"(*NullString).Scan", Method, 0}, + {"(*NullTime).Scan", Method, 13}, + {"(*Row).Err", Method, 15}, + {"(*Row).Scan", Method, 0}, + {"(*Rows).Close", Method, 0}, + {"(*Rows).ColumnTypes", Method, 8}, + {"(*Rows).Columns", Method, 0}, + {"(*Rows).Err", Method, 0}, + {"(*Rows).Next", Method, 0}, + {"(*Rows).NextResultSet", Method, 8}, + {"(*Rows).Scan", Method, 0}, + {"(*Stmt).Close", Method, 0}, + {"(*Stmt).Exec", Method, 0}, + {"(*Stmt).ExecContext", Method, 8}, + {"(*Stmt).Query", Method, 0}, + {"(*Stmt).QueryContext", Method, 8}, + {"(*Stmt).QueryRow", Method, 0}, + {"(*Stmt).QueryRowContext", Method, 8}, + {"(*Tx).Commit", Method, 0}, + {"(*Tx).Exec", Method, 0}, + {"(*Tx).ExecContext", Method, 8}, + {"(*Tx).Prepare", Method, 0}, + {"(*Tx).PrepareContext", Method, 8}, + {"(*Tx).Query", Method, 0}, + {"(*Tx).QueryContext", Method, 8}, + {"(*Tx).QueryRow", Method, 0}, + {"(*Tx).QueryRowContext", Method, 8}, + {"(*Tx).Rollback", Method, 0}, + {"(*Tx).Stmt", Method, 0}, + {"(*Tx).StmtContext", Method, 8}, + {"(IsolationLevel).String", Method, 11}, + {"(Null).Value", Method, 22}, + {"(NullBool).Value", Method, 0}, + {"(NullByte).Value", Method, 17}, + {"(NullFloat64).Value", Method, 0}, + {"(NullInt16).Value", Method, 17}, + {"(NullInt32).Value", Method, 13}, + {"(NullInt64).Value", Method, 0}, + {"(NullString).Value", Method, 0}, + {"(NullTime).Value", Method, 13}, + {"ColumnType", Type, 8}, + {"Conn", Type, 9}, + {"DB", Type, 0}, + {"DBStats", Type, 5}, + {"DBStats.Idle", Field, 11}, + {"DBStats.InUse", Field, 11}, + {"DBStats.MaxIdleClosed", Field, 11}, + {"DBStats.MaxIdleTimeClosed", Field, 15}, + {"DBStats.MaxLifetimeClosed", Field, 11}, + {"DBStats.MaxOpenConnections", Field, 11}, + {"DBStats.OpenConnections", Field, 5}, + {"DBStats.WaitCount", Field, 11}, + {"DBStats.WaitDuration", Field, 11}, + {"Drivers", Func, 4}, + {"ErrConnDone", Var, 9}, + {"ErrNoRows", Var, 0}, + {"ErrTxDone", Var, 0}, + {"IsolationLevel", Type, 8}, + {"LevelDefault", Const, 8}, + {"LevelLinearizable", Const, 8}, + {"LevelReadCommitted", Const, 8}, + {"LevelReadUncommitted", Const, 8}, + {"LevelRepeatableRead", Const, 8}, + {"LevelSerializable", Const, 8}, + {"LevelSnapshot", Const, 8}, + {"LevelWriteCommitted", Const, 8}, + {"Named", Func, 8}, + {"NamedArg", Type, 8}, + {"NamedArg.Name", Field, 8}, + {"NamedArg.Value", Field, 8}, + {"Null", Type, 22}, + {"Null.V", Field, 22}, + {"Null.Valid", Field, 22}, + {"NullBool", Type, 0}, + {"NullBool.Bool", Field, 0}, + {"NullBool.Valid", Field, 0}, + {"NullByte", Type, 17}, + {"NullByte.Byte", Field, 17}, + {"NullByte.Valid", Field, 17}, + {"NullFloat64", Type, 0}, + {"NullFloat64.Float64", Field, 0}, + {"NullFloat64.Valid", Field, 0}, + {"NullInt16", Type, 17}, + {"NullInt16.Int16", Field, 17}, + {"NullInt16.Valid", Field, 17}, + {"NullInt32", Type, 13}, + {"NullInt32.Int32", Field, 13}, + {"NullInt32.Valid", Field, 13}, + {"NullInt64", Type, 0}, + {"NullInt64.Int64", Field, 0}, + {"NullInt64.Valid", Field, 0}, + {"NullString", Type, 0}, + {"NullString.String", Field, 0}, + {"NullString.Valid", Field, 0}, + {"NullTime", Type, 13}, + {"NullTime.Time", Field, 13}, + {"NullTime.Valid", Field, 13}, + {"Open", Func, 0}, + {"OpenDB", Func, 10}, + {"Out", Type, 9}, + {"Out.Dest", Field, 9}, + {"Out.In", Field, 9}, + {"RawBytes", Type, 0}, + {"Register", Func, 0}, + {"Result", Type, 0}, + {"Row", Type, 0}, + {"Rows", Type, 0}, + {"Scanner", Type, 0}, + {"Stmt", Type, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + }, + "database/sql/driver": { + {"(NotNull).ConvertValue", Method, 0}, + {"(Null).ConvertValue", Method, 0}, + {"(RowsAffected).LastInsertId", Method, 0}, + {"(RowsAffected).RowsAffected", Method, 0}, + {"Bool", Var, 0}, + {"ColumnConverter", Type, 0}, + {"Conn", Type, 0}, + {"ConnBeginTx", Type, 8}, + {"ConnPrepareContext", Type, 8}, + {"Connector", Type, 10}, + {"DefaultParameterConverter", Var, 0}, + {"Driver", Type, 0}, + {"DriverContext", Type, 10}, + {"ErrBadConn", Var, 0}, + {"ErrRemoveArgument", Var, 9}, + {"ErrSkip", Var, 0}, + {"Execer", Type, 0}, + {"ExecerContext", Type, 8}, + {"Int32", Var, 0}, + {"IsScanValue", Func, 0}, + {"IsValue", Func, 0}, + {"IsolationLevel", Type, 8}, + {"NamedValue", Type, 8}, + {"NamedValue.Name", Field, 8}, + {"NamedValue.Ordinal", Field, 8}, + {"NamedValue.Value", Field, 8}, + {"NamedValueChecker", Type, 9}, + {"NotNull", Type, 0}, + {"NotNull.Converter", Field, 0}, + {"Null", Type, 0}, + {"Null.Converter", Field, 0}, + {"Pinger", Type, 8}, + {"Queryer", Type, 1}, + {"QueryerContext", Type, 8}, + {"Result", Type, 0}, + {"ResultNoRows", Var, 0}, + {"Rows", Type, 0}, + {"RowsAffected", Type, 0}, + {"RowsColumnTypeDatabaseTypeName", Type, 8}, + {"RowsColumnTypeLength", Type, 8}, + {"RowsColumnTypeNullable", Type, 8}, + {"RowsColumnTypePrecisionScale", Type, 8}, + {"RowsColumnTypeScanType", Type, 8}, + {"RowsNextResultSet", Type, 8}, + {"SessionResetter", Type, 10}, + {"Stmt", Type, 0}, + {"StmtExecContext", Type, 8}, + {"StmtQueryContext", Type, 8}, + {"String", Var, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + {"Validator", Type, 15}, + {"Value", Type, 0}, + {"ValueConverter", Type, 0}, + {"Valuer", Type, 0}, + }, + "debug/buildinfo": { + {"BuildInfo", Type, 18}, + {"Read", Func, 18}, + {"ReadFile", Func, 18}, + }, + "debug/dwarf": { + {"(*AddrType).Basic", Method, 0}, + {"(*AddrType).Common", Method, 0}, + {"(*AddrType).Size", Method, 0}, + {"(*AddrType).String", Method, 0}, + {"(*ArrayType).Common", Method, 0}, + {"(*ArrayType).Size", Method, 0}, + {"(*ArrayType).String", Method, 0}, + {"(*BasicType).Basic", Method, 0}, + {"(*BasicType).Common", Method, 0}, + {"(*BasicType).Size", Method, 0}, + {"(*BasicType).String", Method, 0}, + {"(*BoolType).Basic", Method, 0}, + {"(*BoolType).Common", Method, 0}, + {"(*BoolType).Size", Method, 0}, + {"(*BoolType).String", Method, 0}, + {"(*CharType).Basic", Method, 0}, + {"(*CharType).Common", Method, 0}, + {"(*CharType).Size", Method, 0}, + {"(*CharType).String", Method, 0}, + {"(*CommonType).Common", Method, 0}, + {"(*CommonType).Size", Method, 0}, + {"(*ComplexType).Basic", Method, 0}, + {"(*ComplexType).Common", Method, 0}, + {"(*ComplexType).Size", Method, 0}, + {"(*ComplexType).String", Method, 0}, + {"(*Data).AddSection", Method, 14}, + {"(*Data).AddTypes", Method, 3}, + {"(*Data).LineReader", Method, 5}, + {"(*Data).Ranges", Method, 7}, + {"(*Data).Reader", Method, 0}, + {"(*Data).Type", Method, 0}, + {"(*DotDotDotType).Common", Method, 0}, + {"(*DotDotDotType).Size", Method, 0}, + {"(*DotDotDotType).String", Method, 0}, + {"(*Entry).AttrField", Method, 5}, + {"(*Entry).Val", Method, 0}, + {"(*EnumType).Common", Method, 0}, + {"(*EnumType).Size", Method, 0}, + {"(*EnumType).String", Method, 0}, + {"(*FloatType).Basic", Method, 0}, + {"(*FloatType).Common", Method, 0}, + {"(*FloatType).Size", Method, 0}, + {"(*FloatType).String", Method, 0}, + {"(*FuncType).Common", Method, 0}, + {"(*FuncType).Size", Method, 0}, + {"(*FuncType).String", Method, 0}, + {"(*IntType).Basic", Method, 0}, + {"(*IntType).Common", Method, 0}, + {"(*IntType).Size", Method, 0}, + {"(*IntType).String", Method, 0}, + {"(*LineReader).Files", Method, 14}, + {"(*LineReader).Next", Method, 5}, + {"(*LineReader).Reset", Method, 5}, + {"(*LineReader).Seek", Method, 5}, + {"(*LineReader).SeekPC", Method, 5}, + {"(*LineReader).Tell", Method, 5}, + {"(*PtrType).Common", Method, 0}, + {"(*PtrType).Size", Method, 0}, + {"(*PtrType).String", Method, 0}, + {"(*QualType).Common", Method, 0}, + {"(*QualType).Size", Method, 0}, + {"(*QualType).String", Method, 0}, + {"(*Reader).AddressSize", Method, 5}, + {"(*Reader).ByteOrder", Method, 14}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).SeekPC", Method, 7}, + {"(*Reader).SkipChildren", Method, 0}, + {"(*StructType).Common", Method, 0}, + {"(*StructType).Defn", Method, 0}, + {"(*StructType).Size", Method, 0}, + {"(*StructType).String", Method, 0}, + {"(*TypedefType).Common", Method, 0}, + {"(*TypedefType).Size", Method, 0}, + {"(*TypedefType).String", Method, 0}, + {"(*UcharType).Basic", Method, 0}, + {"(*UcharType).Common", Method, 0}, + {"(*UcharType).Size", Method, 0}, + {"(*UcharType).String", Method, 0}, + {"(*UintType).Basic", Method, 0}, + {"(*UintType).Common", Method, 0}, + {"(*UintType).Size", Method, 0}, + {"(*UintType).String", Method, 0}, + {"(*UnspecifiedType).Basic", Method, 4}, + {"(*UnspecifiedType).Common", Method, 4}, + {"(*UnspecifiedType).Size", Method, 4}, + {"(*UnspecifiedType).String", Method, 4}, + {"(*UnsupportedType).Common", Method, 13}, + {"(*UnsupportedType).Size", Method, 13}, + {"(*UnsupportedType).String", Method, 13}, + {"(*VoidType).Common", Method, 0}, + {"(*VoidType).Size", Method, 0}, + {"(*VoidType).String", Method, 0}, + {"(Attr).GoString", Method, 0}, + {"(Attr).String", Method, 0}, + {"(Class).GoString", Method, 5}, + {"(Class).String", Method, 5}, + {"(DecodeError).Error", Method, 0}, + {"(Tag).GoString", Method, 0}, + {"(Tag).String", Method, 0}, + {"AddrType", Type, 0}, + {"AddrType.BasicType", Field, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.CommonType", Field, 0}, + {"ArrayType.Count", Field, 0}, + {"ArrayType.StrideBitSize", Field, 0}, + {"ArrayType.Type", Field, 0}, + {"Attr", Type, 0}, + {"AttrAbstractOrigin", Const, 0}, + {"AttrAccessibility", Const, 0}, + {"AttrAddrBase", Const, 14}, + {"AttrAddrClass", Const, 0}, + {"AttrAlignment", Const, 14}, + {"AttrAllocated", Const, 0}, + {"AttrArtificial", Const, 0}, + {"AttrAssociated", Const, 0}, + {"AttrBaseTypes", Const, 0}, + {"AttrBinaryScale", Const, 14}, + {"AttrBitOffset", Const, 0}, + {"AttrBitSize", Const, 0}, + {"AttrByteSize", Const, 0}, + {"AttrCallAllCalls", Const, 14}, + {"AttrCallAllSourceCalls", Const, 14}, + {"AttrCallAllTailCalls", Const, 14}, + {"AttrCallColumn", Const, 0}, + {"AttrCallDataLocation", Const, 14}, + {"AttrCallDataValue", Const, 14}, + {"AttrCallFile", Const, 0}, + {"AttrCallLine", Const, 0}, + {"AttrCallOrigin", Const, 14}, + {"AttrCallPC", Const, 14}, + {"AttrCallParameter", Const, 14}, + {"AttrCallReturnPC", Const, 14}, + {"AttrCallTailCall", Const, 14}, + {"AttrCallTarget", Const, 14}, + {"AttrCallTargetClobbered", Const, 14}, + {"AttrCallValue", Const, 14}, + {"AttrCalling", Const, 0}, + {"AttrCommonRef", Const, 0}, + {"AttrCompDir", Const, 0}, + {"AttrConstExpr", Const, 14}, + {"AttrConstValue", Const, 0}, + {"AttrContainingType", Const, 0}, + {"AttrCount", Const, 0}, + {"AttrDataBitOffset", Const, 14}, + {"AttrDataLocation", Const, 0}, + {"AttrDataMemberLoc", Const, 0}, + {"AttrDecimalScale", Const, 14}, + {"AttrDecimalSign", Const, 14}, + {"AttrDeclColumn", Const, 0}, + {"AttrDeclFile", Const, 0}, + {"AttrDeclLine", Const, 0}, + {"AttrDeclaration", Const, 0}, + {"AttrDefaultValue", Const, 0}, + {"AttrDefaulted", Const, 14}, + {"AttrDeleted", Const, 14}, + {"AttrDescription", Const, 0}, + {"AttrDigitCount", Const, 14}, + {"AttrDiscr", Const, 0}, + {"AttrDiscrList", Const, 0}, + {"AttrDiscrValue", Const, 0}, + {"AttrDwoName", Const, 14}, + {"AttrElemental", Const, 14}, + {"AttrEncoding", Const, 0}, + {"AttrEndianity", Const, 14}, + {"AttrEntrypc", Const, 0}, + {"AttrEnumClass", Const, 14}, + {"AttrExplicit", Const, 14}, + {"AttrExportSymbols", Const, 14}, + {"AttrExtension", Const, 0}, + {"AttrExternal", Const, 0}, + {"AttrFrameBase", Const, 0}, + {"AttrFriend", Const, 0}, + {"AttrHighpc", Const, 0}, + {"AttrIdentifierCase", Const, 0}, + {"AttrImport", Const, 0}, + {"AttrInline", Const, 0}, + {"AttrIsOptional", Const, 0}, + {"AttrLanguage", Const, 0}, + {"AttrLinkageName", Const, 14}, + {"AttrLocation", Const, 0}, + {"AttrLoclistsBase", Const, 14}, + {"AttrLowerBound", Const, 0}, + {"AttrLowpc", Const, 0}, + {"AttrMacroInfo", Const, 0}, + {"AttrMacros", Const, 14}, + {"AttrMainSubprogram", Const, 14}, + {"AttrMutable", Const, 14}, + {"AttrName", Const, 0}, + {"AttrNamelistItem", Const, 0}, + {"AttrNoreturn", Const, 14}, + {"AttrObjectPointer", Const, 14}, + {"AttrOrdering", Const, 0}, + {"AttrPictureString", Const, 14}, + {"AttrPriority", Const, 0}, + {"AttrProducer", Const, 0}, + {"AttrPrototyped", Const, 0}, + {"AttrPure", Const, 14}, + {"AttrRanges", Const, 0}, + {"AttrRank", Const, 14}, + {"AttrRecursive", Const, 14}, + {"AttrReference", Const, 14}, + {"AttrReturnAddr", Const, 0}, + {"AttrRnglistsBase", Const, 14}, + {"AttrRvalueReference", Const, 14}, + {"AttrSegment", Const, 0}, + {"AttrSibling", Const, 0}, + {"AttrSignature", Const, 14}, + {"AttrSmall", Const, 14}, + {"AttrSpecification", Const, 0}, + {"AttrStartScope", Const, 0}, + {"AttrStaticLink", Const, 0}, + {"AttrStmtList", Const, 0}, + {"AttrStrOffsetsBase", Const, 14}, + {"AttrStride", Const, 0}, + {"AttrStrideSize", Const, 0}, + {"AttrStringLength", Const, 0}, + {"AttrStringLengthBitSize", Const, 14}, + {"AttrStringLengthByteSize", Const, 14}, + {"AttrThreadsScaled", Const, 14}, + {"AttrTrampoline", Const, 0}, + {"AttrType", Const, 0}, + {"AttrUpperBound", Const, 0}, + {"AttrUseLocation", Const, 0}, + {"AttrUseUTF8", Const, 0}, + {"AttrVarParam", Const, 0}, + {"AttrVirtuality", Const, 0}, + {"AttrVisibility", Const, 0}, + {"AttrVtableElemLoc", Const, 0}, + {"BasicType", Type, 0}, + {"BasicType.BitOffset", Field, 0}, + {"BasicType.BitSize", Field, 0}, + {"BasicType.CommonType", Field, 0}, + {"BasicType.DataBitOffset", Field, 18}, + {"BoolType", Type, 0}, + {"BoolType.BasicType", Field, 0}, + {"CharType", Type, 0}, + {"CharType.BasicType", Field, 0}, + {"Class", Type, 5}, + {"ClassAddrPtr", Const, 14}, + {"ClassAddress", Const, 5}, + {"ClassBlock", Const, 5}, + {"ClassConstant", Const, 5}, + {"ClassExprLoc", Const, 5}, + {"ClassFlag", Const, 5}, + {"ClassLinePtr", Const, 5}, + {"ClassLocList", Const, 14}, + {"ClassLocListPtr", Const, 5}, + {"ClassMacPtr", Const, 5}, + {"ClassRangeListPtr", Const, 5}, + {"ClassReference", Const, 5}, + {"ClassReferenceAlt", Const, 5}, + {"ClassReferenceSig", Const, 5}, + {"ClassRngList", Const, 14}, + {"ClassRngListsPtr", Const, 14}, + {"ClassStrOffsetsPtr", Const, 14}, + {"ClassString", Const, 5}, + {"ClassStringAlt", Const, 5}, + {"ClassUnknown", Const, 6}, + {"CommonType", Type, 0}, + {"CommonType.ByteSize", Field, 0}, + {"CommonType.Name", Field, 0}, + {"ComplexType", Type, 0}, + {"ComplexType.BasicType", Field, 0}, + {"Data", Type, 0}, + {"DecodeError", Type, 0}, + {"DecodeError.Err", Field, 0}, + {"DecodeError.Name", Field, 0}, + {"DecodeError.Offset", Field, 0}, + {"DotDotDotType", Type, 0}, + {"DotDotDotType.CommonType", Field, 0}, + {"Entry", Type, 0}, + {"Entry.Children", Field, 0}, + {"Entry.Field", Field, 0}, + {"Entry.Offset", Field, 0}, + {"Entry.Tag", Field, 0}, + {"EnumType", Type, 0}, + {"EnumType.CommonType", Field, 0}, + {"EnumType.EnumName", Field, 0}, + {"EnumType.Val", Field, 0}, + {"EnumValue", Type, 0}, + {"EnumValue.Name", Field, 0}, + {"EnumValue.Val", Field, 0}, + {"ErrUnknownPC", Var, 5}, + {"Field", Type, 0}, + {"Field.Attr", Field, 0}, + {"Field.Class", Field, 5}, + {"Field.Val", Field, 0}, + {"FloatType", Type, 0}, + {"FloatType.BasicType", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.CommonType", Field, 0}, + {"FuncType.ParamType", Field, 0}, + {"FuncType.ReturnType", Field, 0}, + {"IntType", Type, 0}, + {"IntType.BasicType", Field, 0}, + {"LineEntry", Type, 5}, + {"LineEntry.Address", Field, 5}, + {"LineEntry.BasicBlock", Field, 5}, + {"LineEntry.Column", Field, 5}, + {"LineEntry.Discriminator", Field, 5}, + {"LineEntry.EndSequence", Field, 5}, + {"LineEntry.EpilogueBegin", Field, 5}, + {"LineEntry.File", Field, 5}, + {"LineEntry.ISA", Field, 5}, + {"LineEntry.IsStmt", Field, 5}, + {"LineEntry.Line", Field, 5}, + {"LineEntry.OpIndex", Field, 5}, + {"LineEntry.PrologueEnd", Field, 5}, + {"LineFile", Type, 5}, + {"LineFile.Length", Field, 5}, + {"LineFile.Mtime", Field, 5}, + {"LineFile.Name", Field, 5}, + {"LineReader", Type, 5}, + {"LineReaderPos", Type, 5}, + {"New", Func, 0}, + {"Offset", Type, 0}, + {"PtrType", Type, 0}, + {"PtrType.CommonType", Field, 0}, + {"PtrType.Type", Field, 0}, + {"QualType", Type, 0}, + {"QualType.CommonType", Field, 0}, + {"QualType.Qual", Field, 0}, + {"QualType.Type", Field, 0}, + {"Reader", Type, 0}, + {"StructField", Type, 0}, + {"StructField.BitOffset", Field, 0}, + {"StructField.BitSize", Field, 0}, + {"StructField.ByteOffset", Field, 0}, + {"StructField.ByteSize", Field, 0}, + {"StructField.DataBitOffset", Field, 18}, + {"StructField.Name", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructType", Type, 0}, + {"StructType.CommonType", Field, 0}, + {"StructType.Field", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Kind", Field, 0}, + {"StructType.StructName", Field, 0}, + {"Tag", Type, 0}, + {"TagAccessDeclaration", Const, 0}, + {"TagArrayType", Const, 0}, + {"TagAtomicType", Const, 14}, + {"TagBaseType", Const, 0}, + {"TagCallSite", Const, 14}, + {"TagCallSiteParameter", Const, 14}, + {"TagCatchDwarfBlock", Const, 0}, + {"TagClassType", Const, 0}, + {"TagCoarrayType", Const, 14}, + {"TagCommonDwarfBlock", Const, 0}, + {"TagCommonInclusion", Const, 0}, + {"TagCompileUnit", Const, 0}, + {"TagCondition", Const, 3}, + {"TagConstType", Const, 0}, + {"TagConstant", Const, 0}, + {"TagDwarfProcedure", Const, 0}, + {"TagDynamicType", Const, 14}, + {"TagEntryPoint", Const, 0}, + {"TagEnumerationType", Const, 0}, + {"TagEnumerator", Const, 0}, + {"TagFileType", Const, 0}, + {"TagFormalParameter", Const, 0}, + {"TagFriend", Const, 0}, + {"TagGenericSubrange", Const, 14}, + {"TagImmutableType", Const, 14}, + {"TagImportedDeclaration", Const, 0}, + {"TagImportedModule", Const, 0}, + {"TagImportedUnit", Const, 0}, + {"TagInheritance", Const, 0}, + {"TagInlinedSubroutine", Const, 0}, + {"TagInterfaceType", Const, 0}, + {"TagLabel", Const, 0}, + {"TagLexDwarfBlock", Const, 0}, + {"TagMember", Const, 0}, + {"TagModule", Const, 0}, + {"TagMutableType", Const, 0}, + {"TagNamelist", Const, 0}, + {"TagNamelistItem", Const, 0}, + {"TagNamespace", Const, 0}, + {"TagPackedType", Const, 0}, + {"TagPartialUnit", Const, 0}, + {"TagPointerType", Const, 0}, + {"TagPtrToMemberType", Const, 0}, + {"TagReferenceType", Const, 0}, + {"TagRestrictType", Const, 0}, + {"TagRvalueReferenceType", Const, 3}, + {"TagSetType", Const, 0}, + {"TagSharedType", Const, 3}, + {"TagSkeletonUnit", Const, 14}, + {"TagStringType", Const, 0}, + {"TagStructType", Const, 0}, + {"TagSubprogram", Const, 0}, + {"TagSubrangeType", Const, 0}, + {"TagSubroutineType", Const, 0}, + {"TagTemplateAlias", Const, 3}, + {"TagTemplateTypeParameter", Const, 0}, + {"TagTemplateValueParameter", Const, 0}, + {"TagThrownType", Const, 0}, + {"TagTryDwarfBlock", Const, 0}, + {"TagTypeUnit", Const, 3}, + {"TagTypedef", Const, 0}, + {"TagUnionType", Const, 0}, + {"TagUnspecifiedParameters", Const, 0}, + {"TagUnspecifiedType", Const, 0}, + {"TagVariable", Const, 0}, + {"TagVariant", Const, 0}, + {"TagVariantPart", Const, 0}, + {"TagVolatileType", Const, 0}, + {"TagWithStmt", Const, 0}, + {"Type", Type, 0}, + {"TypedefType", Type, 0}, + {"TypedefType.CommonType", Field, 0}, + {"TypedefType.Type", Field, 0}, + {"UcharType", Type, 0}, + {"UcharType.BasicType", Field, 0}, + {"UintType", Type, 0}, + {"UintType.BasicType", Field, 0}, + {"UnspecifiedType", Type, 4}, + {"UnspecifiedType.BasicType", Field, 4}, + {"UnsupportedType", Type, 13}, + {"UnsupportedType.CommonType", Field, 13}, + {"UnsupportedType.Tag", Field, 13}, + {"VoidType", Type, 0}, + {"VoidType.CommonType", Field, 0}, + }, + "debug/elf": { + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).DynString", Method, 1}, + {"(*File).DynValue", Method, 21}, + {"(*File).DynamicSymbols", Method, 4}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).SectionByType", Method, 0}, + {"(*File).Symbols", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Prog).Open", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Class).GoString", Method, 0}, + {"(Class).String", Method, 0}, + {"(CompressionType).GoString", Method, 6}, + {"(CompressionType).String", Method, 6}, + {"(Data).GoString", Method, 0}, + {"(Data).String", Method, 0}, + {"(DynFlag).GoString", Method, 0}, + {"(DynFlag).String", Method, 0}, + {"(DynFlag1).GoString", Method, 21}, + {"(DynFlag1).String", Method, 21}, + {"(DynTag).GoString", Method, 0}, + {"(DynTag).String", Method, 0}, + {"(Machine).GoString", Method, 0}, + {"(Machine).String", Method, 0}, + {"(NType).GoString", Method, 0}, + {"(NType).String", Method, 0}, + {"(OSABI).GoString", Method, 0}, + {"(OSABI).String", Method, 0}, + {"(Prog).ReadAt", Method, 0}, + {"(ProgFlag).GoString", Method, 0}, + {"(ProgFlag).String", Method, 0}, + {"(ProgType).GoString", Method, 0}, + {"(ProgType).String", Method, 0}, + {"(R_386).GoString", Method, 0}, + {"(R_386).String", Method, 0}, + {"(R_390).GoString", Method, 7}, + {"(R_390).String", Method, 7}, + {"(R_AARCH64).GoString", Method, 4}, + {"(R_AARCH64).String", Method, 4}, + {"(R_ALPHA).GoString", Method, 0}, + {"(R_ALPHA).String", Method, 0}, + {"(R_ARM).GoString", Method, 0}, + {"(R_ARM).String", Method, 0}, + {"(R_LARCH).GoString", Method, 19}, + {"(R_LARCH).String", Method, 19}, + {"(R_MIPS).GoString", Method, 6}, + {"(R_MIPS).String", Method, 6}, + {"(R_PPC).GoString", Method, 0}, + {"(R_PPC).String", Method, 0}, + {"(R_PPC64).GoString", Method, 5}, + {"(R_PPC64).String", Method, 5}, + {"(R_RISCV).GoString", Method, 11}, + {"(R_RISCV).String", Method, 11}, + {"(R_SPARC).GoString", Method, 0}, + {"(R_SPARC).String", Method, 0}, + {"(R_X86_64).GoString", Method, 0}, + {"(R_X86_64).String", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(SectionFlag).GoString", Method, 0}, + {"(SectionFlag).String", Method, 0}, + {"(SectionIndex).GoString", Method, 0}, + {"(SectionIndex).String", Method, 0}, + {"(SectionType).GoString", Method, 0}, + {"(SectionType).String", Method, 0}, + {"(SymBind).GoString", Method, 0}, + {"(SymBind).String", Method, 0}, + {"(SymType).GoString", Method, 0}, + {"(SymType).String", Method, 0}, + {"(SymVis).GoString", Method, 0}, + {"(SymVis).String", Method, 0}, + {"(Type).GoString", Method, 0}, + {"(Type).String", Method, 0}, + {"(Version).GoString", Method, 0}, + {"(Version).String", Method, 0}, + {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, + {"COMPRESS_HIOS", Const, 6}, + {"COMPRESS_HIPROC", Const, 6}, + {"COMPRESS_LOOS", Const, 6}, + {"COMPRESS_LOPROC", Const, 6}, + {"COMPRESS_ZLIB", Const, 6}, + {"COMPRESS_ZSTD", Const, 21}, + {"Chdr32", Type, 6}, + {"Chdr32.Addralign", Field, 6}, + {"Chdr32.Size", Field, 6}, + {"Chdr32.Type", Field, 6}, + {"Chdr64", Type, 6}, + {"Chdr64.Addralign", Field, 6}, + {"Chdr64.Size", Field, 6}, + {"Chdr64.Type", Field, 6}, + {"Class", Type, 0}, + {"CompressionType", Type, 6}, + {"DF_1_CONFALT", Const, 21}, + {"DF_1_DIRECT", Const, 21}, + {"DF_1_DISPRELDNE", Const, 21}, + {"DF_1_DISPRELPND", Const, 21}, + {"DF_1_EDITED", Const, 21}, + {"DF_1_ENDFILTEE", Const, 21}, + {"DF_1_GLOBAL", Const, 21}, + {"DF_1_GLOBAUDIT", Const, 21}, + {"DF_1_GROUP", Const, 21}, + {"DF_1_IGNMULDEF", Const, 21}, + {"DF_1_INITFIRST", Const, 21}, + {"DF_1_INTERPOSE", Const, 21}, + {"DF_1_KMOD", Const, 21}, + {"DF_1_LOADFLTR", Const, 21}, + {"DF_1_NOCOMMON", Const, 21}, + {"DF_1_NODEFLIB", Const, 21}, + {"DF_1_NODELETE", Const, 21}, + {"DF_1_NODIRECT", Const, 21}, + {"DF_1_NODUMP", Const, 21}, + {"DF_1_NOHDR", Const, 21}, + {"DF_1_NOKSYMS", Const, 21}, + {"DF_1_NOOPEN", Const, 21}, + {"DF_1_NORELOC", Const, 21}, + {"DF_1_NOW", Const, 21}, + {"DF_1_ORIGIN", Const, 21}, + {"DF_1_PIE", Const, 21}, + {"DF_1_SINGLETON", Const, 21}, + {"DF_1_STUB", Const, 21}, + {"DF_1_SYMINTPOSE", Const, 21}, + {"DF_1_TRANS", Const, 21}, + {"DF_1_WEAKFILTER", Const, 21}, + {"DF_BIND_NOW", Const, 0}, + {"DF_ORIGIN", Const, 0}, + {"DF_STATIC_TLS", Const, 0}, + {"DF_SYMBOLIC", Const, 0}, + {"DF_TEXTREL", Const, 0}, + {"DT_ADDRRNGHI", Const, 16}, + {"DT_ADDRRNGLO", Const, 16}, + {"DT_AUDIT", Const, 16}, + {"DT_AUXILIARY", Const, 16}, + {"DT_BIND_NOW", Const, 0}, + {"DT_CHECKSUM", Const, 16}, + {"DT_CONFIG", Const, 16}, + {"DT_DEBUG", Const, 0}, + {"DT_DEPAUDIT", Const, 16}, + {"DT_ENCODING", Const, 0}, + {"DT_FEATURE", Const, 16}, + {"DT_FILTER", Const, 16}, + {"DT_FINI", Const, 0}, + {"DT_FINI_ARRAY", Const, 0}, + {"DT_FINI_ARRAYSZ", Const, 0}, + {"DT_FLAGS", Const, 0}, + {"DT_FLAGS_1", Const, 16}, + {"DT_GNU_CONFLICT", Const, 16}, + {"DT_GNU_CONFLICTSZ", Const, 16}, + {"DT_GNU_HASH", Const, 16}, + {"DT_GNU_LIBLIST", Const, 16}, + {"DT_GNU_LIBLISTSZ", Const, 16}, + {"DT_GNU_PRELINKED", Const, 16}, + {"DT_HASH", Const, 0}, + {"DT_HIOS", Const, 0}, + {"DT_HIPROC", Const, 0}, + {"DT_INIT", Const, 0}, + {"DT_INIT_ARRAY", Const, 0}, + {"DT_INIT_ARRAYSZ", Const, 0}, + {"DT_JMPREL", Const, 0}, + {"DT_LOOS", Const, 0}, + {"DT_LOPROC", Const, 0}, + {"DT_MIPS_AUX_DYNAMIC", Const, 16}, + {"DT_MIPS_BASE_ADDRESS", Const, 16}, + {"DT_MIPS_COMPACT_SIZE", Const, 16}, + {"DT_MIPS_CONFLICT", Const, 16}, + {"DT_MIPS_CONFLICTNO", Const, 16}, + {"DT_MIPS_CXX_FLAGS", Const, 16}, + {"DT_MIPS_DELTA_CLASS", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16}, + {"DT_MIPS_DELTA_CLASS_NO", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16}, + {"DT_MIPS_DELTA_RELOC", Const, 16}, + {"DT_MIPS_DELTA_RELOC_NO", Const, 16}, + {"DT_MIPS_DELTA_SYM", Const, 16}, + {"DT_MIPS_DELTA_SYM_NO", Const, 16}, + {"DT_MIPS_DYNSTR_ALIGN", Const, 16}, + {"DT_MIPS_FLAGS", Const, 16}, + {"DT_MIPS_GOTSYM", Const, 16}, + {"DT_MIPS_GP_VALUE", Const, 16}, + {"DT_MIPS_HIDDEN_GOTIDX", Const, 16}, + {"DT_MIPS_HIPAGENO", Const, 16}, + {"DT_MIPS_ICHECKSUM", Const, 16}, + {"DT_MIPS_INTERFACE", Const, 16}, + {"DT_MIPS_INTERFACE_SIZE", Const, 16}, + {"DT_MIPS_IVERSION", Const, 16}, + {"DT_MIPS_LIBLIST", Const, 16}, + {"DT_MIPS_LIBLISTNO", Const, 16}, + {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTNO", Const, 16}, + {"DT_MIPS_MSYM", Const, 16}, + {"DT_MIPS_OPTIONS", Const, 16}, + {"DT_MIPS_PERF_SUFFIX", Const, 16}, + {"DT_MIPS_PIXIE_INIT", Const, 16}, + {"DT_MIPS_PLTGOT", Const, 16}, + {"DT_MIPS_PROTECTED_GOTIDX", Const, 16}, + {"DT_MIPS_RLD_MAP", Const, 16}, + {"DT_MIPS_RLD_MAP_REL", Const, 16}, + {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16}, + {"DT_MIPS_RLD_VERSION", Const, 16}, + {"DT_MIPS_RWPLT", Const, 16}, + {"DT_MIPS_SYMBOL_LIB", Const, 16}, + {"DT_MIPS_SYMTABNO", Const, 16}, + {"DT_MIPS_TIME_STAMP", Const, 16}, + {"DT_MIPS_UNREFEXTNO", Const, 16}, + {"DT_MOVEENT", Const, 16}, + {"DT_MOVESZ", Const, 16}, + {"DT_MOVETAB", Const, 16}, + {"DT_NEEDED", Const, 0}, + {"DT_NULL", Const, 0}, + {"DT_PLTGOT", Const, 0}, + {"DT_PLTPAD", Const, 16}, + {"DT_PLTPADSZ", Const, 16}, + {"DT_PLTREL", Const, 0}, + {"DT_PLTRELSZ", Const, 0}, + {"DT_POSFLAG_1", Const, 16}, + {"DT_PPC64_GLINK", Const, 16}, + {"DT_PPC64_OPD", Const, 16}, + {"DT_PPC64_OPDSZ", Const, 16}, + {"DT_PPC64_OPT", Const, 16}, + {"DT_PPC_GOT", Const, 16}, + {"DT_PPC_OPT", Const, 16}, + {"DT_PREINIT_ARRAY", Const, 0}, + {"DT_PREINIT_ARRAYSZ", Const, 0}, + {"DT_REL", Const, 0}, + {"DT_RELA", Const, 0}, + {"DT_RELACOUNT", Const, 16}, + {"DT_RELAENT", Const, 0}, + {"DT_RELASZ", Const, 0}, + {"DT_RELCOUNT", Const, 16}, + {"DT_RELENT", Const, 0}, + {"DT_RELSZ", Const, 0}, + {"DT_RPATH", Const, 0}, + {"DT_RUNPATH", Const, 0}, + {"DT_SONAME", Const, 0}, + {"DT_SPARC_REGISTER", Const, 16}, + {"DT_STRSZ", Const, 0}, + {"DT_STRTAB", Const, 0}, + {"DT_SYMBOLIC", Const, 0}, + {"DT_SYMENT", Const, 0}, + {"DT_SYMINENT", Const, 16}, + {"DT_SYMINFO", Const, 16}, + {"DT_SYMINSZ", Const, 16}, + {"DT_SYMTAB", Const, 0}, + {"DT_SYMTAB_SHNDX", Const, 16}, + {"DT_TEXTREL", Const, 0}, + {"DT_TLSDESC_GOT", Const, 16}, + {"DT_TLSDESC_PLT", Const, 16}, + {"DT_USED", Const, 16}, + {"DT_VALRNGHI", Const, 16}, + {"DT_VALRNGLO", Const, 16}, + {"DT_VERDEF", Const, 16}, + {"DT_VERDEFNUM", Const, 16}, + {"DT_VERNEED", Const, 0}, + {"DT_VERNEEDNUM", Const, 0}, + {"DT_VERSYM", Const, 0}, + {"Data", Type, 0}, + {"Dyn32", Type, 0}, + {"Dyn32.Tag", Field, 0}, + {"Dyn32.Val", Field, 0}, + {"Dyn64", Type, 0}, + {"Dyn64.Tag", Field, 0}, + {"Dyn64.Val", Field, 0}, + {"DynFlag", Type, 0}, + {"DynFlag1", Type, 21}, + {"DynTag", Type, 0}, + {"EI_ABIVERSION", Const, 0}, + {"EI_CLASS", Const, 0}, + {"EI_DATA", Const, 0}, + {"EI_NIDENT", Const, 0}, + {"EI_OSABI", Const, 0}, + {"EI_PAD", Const, 0}, + {"EI_VERSION", Const, 0}, + {"ELFCLASS32", Const, 0}, + {"ELFCLASS64", Const, 0}, + {"ELFCLASSNONE", Const, 0}, + {"ELFDATA2LSB", Const, 0}, + {"ELFDATA2MSB", Const, 0}, + {"ELFDATANONE", Const, 0}, + {"ELFMAG", Const, 0}, + {"ELFOSABI_86OPEN", Const, 0}, + {"ELFOSABI_AIX", Const, 0}, + {"ELFOSABI_ARM", Const, 0}, + {"ELFOSABI_AROS", Const, 11}, + {"ELFOSABI_CLOUDABI", Const, 11}, + {"ELFOSABI_FENIXOS", Const, 11}, + {"ELFOSABI_FREEBSD", Const, 0}, + {"ELFOSABI_HPUX", Const, 0}, + {"ELFOSABI_HURD", Const, 0}, + {"ELFOSABI_IRIX", Const, 0}, + {"ELFOSABI_LINUX", Const, 0}, + {"ELFOSABI_MODESTO", Const, 0}, + {"ELFOSABI_NETBSD", Const, 0}, + {"ELFOSABI_NONE", Const, 0}, + {"ELFOSABI_NSK", Const, 0}, + {"ELFOSABI_OPENBSD", Const, 0}, + {"ELFOSABI_OPENVMS", Const, 0}, + {"ELFOSABI_SOLARIS", Const, 0}, + {"ELFOSABI_STANDALONE", Const, 0}, + {"ELFOSABI_TRU64", Const, 0}, + {"EM_386", Const, 0}, + {"EM_486", Const, 0}, + {"EM_56800EX", Const, 11}, + {"EM_68HC05", Const, 11}, + {"EM_68HC08", Const, 11}, + {"EM_68HC11", Const, 11}, + {"EM_68HC12", Const, 0}, + {"EM_68HC16", Const, 11}, + {"EM_68K", Const, 0}, + {"EM_78KOR", Const, 11}, + {"EM_8051", Const, 11}, + {"EM_860", Const, 0}, + {"EM_88K", Const, 0}, + {"EM_960", Const, 0}, + {"EM_AARCH64", Const, 4}, + {"EM_ALPHA", Const, 0}, + {"EM_ALPHA_STD", Const, 0}, + {"EM_ALTERA_NIOS2", Const, 11}, + {"EM_AMDGPU", Const, 11}, + {"EM_ARC", Const, 0}, + {"EM_ARCA", Const, 11}, + {"EM_ARC_COMPACT", Const, 11}, + {"EM_ARC_COMPACT2", Const, 11}, + {"EM_ARM", Const, 0}, + {"EM_AVR", Const, 11}, + {"EM_AVR32", Const, 11}, + {"EM_BA1", Const, 11}, + {"EM_BA2", Const, 11}, + {"EM_BLACKFIN", Const, 11}, + {"EM_BPF", Const, 11}, + {"EM_C166", Const, 11}, + {"EM_CDP", Const, 11}, + {"EM_CE", Const, 11}, + {"EM_CLOUDSHIELD", Const, 11}, + {"EM_COGE", Const, 11}, + {"EM_COLDFIRE", Const, 0}, + {"EM_COOL", Const, 11}, + {"EM_COREA_1ST", Const, 11}, + {"EM_COREA_2ND", Const, 11}, + {"EM_CR", Const, 11}, + {"EM_CR16", Const, 11}, + {"EM_CRAYNV2", Const, 11}, + {"EM_CRIS", Const, 11}, + {"EM_CRX", Const, 11}, + {"EM_CSR_KALIMBA", Const, 11}, + {"EM_CUDA", Const, 11}, + {"EM_CYPRESS_M8C", Const, 11}, + {"EM_D10V", Const, 11}, + {"EM_D30V", Const, 11}, + {"EM_DSP24", Const, 11}, + {"EM_DSPIC30F", Const, 11}, + {"EM_DXP", Const, 11}, + {"EM_ECOG1", Const, 11}, + {"EM_ECOG16", Const, 11}, + {"EM_ECOG1X", Const, 11}, + {"EM_ECOG2", Const, 11}, + {"EM_ETPU", Const, 11}, + {"EM_EXCESS", Const, 11}, + {"EM_F2MC16", Const, 11}, + {"EM_FIREPATH", Const, 11}, + {"EM_FR20", Const, 0}, + {"EM_FR30", Const, 11}, + {"EM_FT32", Const, 11}, + {"EM_FX66", Const, 11}, + {"EM_H8S", Const, 0}, + {"EM_H8_300", Const, 0}, + {"EM_H8_300H", Const, 0}, + {"EM_H8_500", Const, 0}, + {"EM_HUANY", Const, 11}, + {"EM_IA_64", Const, 0}, + {"EM_INTEL205", Const, 11}, + {"EM_INTEL206", Const, 11}, + {"EM_INTEL207", Const, 11}, + {"EM_INTEL208", Const, 11}, + {"EM_INTEL209", Const, 11}, + {"EM_IP2K", Const, 11}, + {"EM_JAVELIN", Const, 11}, + {"EM_K10M", Const, 11}, + {"EM_KM32", Const, 11}, + {"EM_KMX16", Const, 11}, + {"EM_KMX32", Const, 11}, + {"EM_KMX8", Const, 11}, + {"EM_KVARC", Const, 11}, + {"EM_L10M", Const, 11}, + {"EM_LANAI", Const, 11}, + {"EM_LATTICEMICO32", Const, 11}, + {"EM_LOONGARCH", Const, 19}, + {"EM_M16C", Const, 11}, + {"EM_M32", Const, 0}, + {"EM_M32C", Const, 11}, + {"EM_M32R", Const, 11}, + {"EM_MANIK", Const, 11}, + {"EM_MAX", Const, 11}, + {"EM_MAXQ30", Const, 11}, + {"EM_MCHP_PIC", Const, 11}, + {"EM_MCST_ELBRUS", Const, 11}, + {"EM_ME16", Const, 0}, + {"EM_METAG", Const, 11}, + {"EM_MICROBLAZE", Const, 11}, + {"EM_MIPS", Const, 0}, + {"EM_MIPS_RS3_LE", Const, 0}, + {"EM_MIPS_RS4_BE", Const, 0}, + {"EM_MIPS_X", Const, 0}, + {"EM_MMA", Const, 0}, + {"EM_MMDSP_PLUS", Const, 11}, + {"EM_MMIX", Const, 11}, + {"EM_MN10200", Const, 11}, + {"EM_MN10300", Const, 11}, + {"EM_MOXIE", Const, 11}, + {"EM_MSP430", Const, 11}, + {"EM_NCPU", Const, 0}, + {"EM_NDR1", Const, 0}, + {"EM_NDS32", Const, 11}, + {"EM_NONE", Const, 0}, + {"EM_NORC", Const, 11}, + {"EM_NS32K", Const, 11}, + {"EM_OPEN8", Const, 11}, + {"EM_OPENRISC", Const, 11}, + {"EM_PARISC", Const, 0}, + {"EM_PCP", Const, 0}, + {"EM_PDP10", Const, 11}, + {"EM_PDP11", Const, 11}, + {"EM_PDSP", Const, 11}, + {"EM_PJ", Const, 11}, + {"EM_PPC", Const, 0}, + {"EM_PPC64", Const, 0}, + {"EM_PRISM", Const, 11}, + {"EM_QDSP6", Const, 11}, + {"EM_R32C", Const, 11}, + {"EM_RCE", Const, 0}, + {"EM_RH32", Const, 0}, + {"EM_RISCV", Const, 11}, + {"EM_RL78", Const, 11}, + {"EM_RS08", Const, 11}, + {"EM_RX", Const, 11}, + {"EM_S370", Const, 0}, + {"EM_S390", Const, 0}, + {"EM_SCORE7", Const, 11}, + {"EM_SEP", Const, 11}, + {"EM_SE_C17", Const, 11}, + {"EM_SE_C33", Const, 11}, + {"EM_SH", Const, 0}, + {"EM_SHARC", Const, 11}, + {"EM_SLE9X", Const, 11}, + {"EM_SNP1K", Const, 11}, + {"EM_SPARC", Const, 0}, + {"EM_SPARC32PLUS", Const, 0}, + {"EM_SPARCV9", Const, 0}, + {"EM_ST100", Const, 0}, + {"EM_ST19", Const, 11}, + {"EM_ST200", Const, 11}, + {"EM_ST7", Const, 11}, + {"EM_ST9PLUS", Const, 11}, + {"EM_STARCORE", Const, 0}, + {"EM_STM8", Const, 11}, + {"EM_STXP7X", Const, 11}, + {"EM_SVX", Const, 11}, + {"EM_TILE64", Const, 11}, + {"EM_TILEGX", Const, 11}, + {"EM_TILEPRO", Const, 11}, + {"EM_TINYJ", Const, 0}, + {"EM_TI_ARP32", Const, 11}, + {"EM_TI_C2000", Const, 11}, + {"EM_TI_C5500", Const, 11}, + {"EM_TI_C6000", Const, 11}, + {"EM_TI_PRU", Const, 11}, + {"EM_TMM_GPP", Const, 11}, + {"EM_TPC", Const, 11}, + {"EM_TRICORE", Const, 0}, + {"EM_TRIMEDIA", Const, 11}, + {"EM_TSK3000", Const, 11}, + {"EM_UNICORE", Const, 11}, + {"EM_V800", Const, 0}, + {"EM_V850", Const, 11}, + {"EM_VAX", Const, 11}, + {"EM_VIDEOCORE", Const, 11}, + {"EM_VIDEOCORE3", Const, 11}, + {"EM_VIDEOCORE5", Const, 11}, + {"EM_VISIUM", Const, 11}, + {"EM_VPP500", Const, 0}, + {"EM_X86_64", Const, 0}, + {"EM_XCORE", Const, 11}, + {"EM_XGATE", Const, 11}, + {"EM_XIMO16", Const, 11}, + {"EM_XTENSA", Const, 11}, + {"EM_Z80", Const, 11}, + {"EM_ZSP", Const, 11}, + {"ET_CORE", Const, 0}, + {"ET_DYN", Const, 0}, + {"ET_EXEC", Const, 0}, + {"ET_HIOS", Const, 0}, + {"ET_HIPROC", Const, 0}, + {"ET_LOOS", Const, 0}, + {"ET_LOPROC", Const, 0}, + {"ET_NONE", Const, 0}, + {"ET_REL", Const, 0}, + {"EV_CURRENT", Const, 0}, + {"EV_NONE", Const, 0}, + {"ErrNoSymbols", Var, 4}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"File.Progs", Field, 0}, + {"File.Sections", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.ABIVersion", Field, 0}, + {"FileHeader.ByteOrder", Field, 0}, + {"FileHeader.Class", Field, 0}, + {"FileHeader.Data", Field, 0}, + {"FileHeader.Entry", Field, 1}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.OSABI", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FileHeader.Version", Field, 0}, + {"FormatError", Type, 0}, + {"Header32", Type, 0}, + {"Header32.Ehsize", Field, 0}, + {"Header32.Entry", Field, 0}, + {"Header32.Flags", Field, 0}, + {"Header32.Ident", Field, 0}, + {"Header32.Machine", Field, 0}, + {"Header32.Phentsize", Field, 0}, + {"Header32.Phnum", Field, 0}, + {"Header32.Phoff", Field, 0}, + {"Header32.Shentsize", Field, 0}, + {"Header32.Shnum", Field, 0}, + {"Header32.Shoff", Field, 0}, + {"Header32.Shstrndx", Field, 0}, + {"Header32.Type", Field, 0}, + {"Header32.Version", Field, 0}, + {"Header64", Type, 0}, + {"Header64.Ehsize", Field, 0}, + {"Header64.Entry", Field, 0}, + {"Header64.Flags", Field, 0}, + {"Header64.Ident", Field, 0}, + {"Header64.Machine", Field, 0}, + {"Header64.Phentsize", Field, 0}, + {"Header64.Phnum", Field, 0}, + {"Header64.Phoff", Field, 0}, + {"Header64.Shentsize", Field, 0}, + {"Header64.Shnum", Field, 0}, + {"Header64.Shoff", Field, 0}, + {"Header64.Shstrndx", Field, 0}, + {"Header64.Type", Field, 0}, + {"Header64.Version", Field, 0}, + {"ImportedSymbol", Type, 0}, + {"ImportedSymbol.Library", Field, 0}, + {"ImportedSymbol.Name", Field, 0}, + {"ImportedSymbol.Version", Field, 0}, + {"Machine", Type, 0}, + {"NT_FPREGSET", Const, 0}, + {"NT_PRPSINFO", Const, 0}, + {"NT_PRSTATUS", Const, 0}, + {"NType", Type, 0}, + {"NewFile", Func, 0}, + {"OSABI", Type, 0}, + {"Open", Func, 0}, + {"PF_MASKOS", Const, 0}, + {"PF_MASKPROC", Const, 0}, + {"PF_R", Const, 0}, + {"PF_W", Const, 0}, + {"PF_X", Const, 0}, + {"PT_AARCH64_ARCHEXT", Const, 16}, + {"PT_AARCH64_UNWIND", Const, 16}, + {"PT_ARM_ARCHEXT", Const, 16}, + {"PT_ARM_EXIDX", Const, 16}, + {"PT_DYNAMIC", Const, 0}, + {"PT_GNU_EH_FRAME", Const, 16}, + {"PT_GNU_MBIND_HI", Const, 16}, + {"PT_GNU_MBIND_LO", Const, 16}, + {"PT_GNU_PROPERTY", Const, 16}, + {"PT_GNU_RELRO", Const, 16}, + {"PT_GNU_STACK", Const, 16}, + {"PT_HIOS", Const, 0}, + {"PT_HIPROC", Const, 0}, + {"PT_INTERP", Const, 0}, + {"PT_LOAD", Const, 0}, + {"PT_LOOS", Const, 0}, + {"PT_LOPROC", Const, 0}, + {"PT_MIPS_ABIFLAGS", Const, 16}, + {"PT_MIPS_OPTIONS", Const, 16}, + {"PT_MIPS_REGINFO", Const, 16}, + {"PT_MIPS_RTPROC", Const, 16}, + {"PT_NOTE", Const, 0}, + {"PT_NULL", Const, 0}, + {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_RANDOMIZE", Const, 16}, + {"PT_OPENBSD_WXNEEDED", Const, 16}, + {"PT_PAX_FLAGS", Const, 16}, + {"PT_PHDR", Const, 0}, + {"PT_S390_PGSTE", Const, 16}, + {"PT_SHLIB", Const, 0}, + {"PT_SUNWSTACK", Const, 16}, + {"PT_SUNW_EH_FRAME", Const, 16}, + {"PT_TLS", Const, 0}, + {"Prog", Type, 0}, + {"Prog.ProgHeader", Field, 0}, + {"Prog.ReaderAt", Field, 0}, + {"Prog32", Type, 0}, + {"Prog32.Align", Field, 0}, + {"Prog32.Filesz", Field, 0}, + {"Prog32.Flags", Field, 0}, + {"Prog32.Memsz", Field, 0}, + {"Prog32.Off", Field, 0}, + {"Prog32.Paddr", Field, 0}, + {"Prog32.Type", Field, 0}, + {"Prog32.Vaddr", Field, 0}, + {"Prog64", Type, 0}, + {"Prog64.Align", Field, 0}, + {"Prog64.Filesz", Field, 0}, + {"Prog64.Flags", Field, 0}, + {"Prog64.Memsz", Field, 0}, + {"Prog64.Off", Field, 0}, + {"Prog64.Paddr", Field, 0}, + {"Prog64.Type", Field, 0}, + {"Prog64.Vaddr", Field, 0}, + {"ProgFlag", Type, 0}, + {"ProgHeader", Type, 0}, + {"ProgHeader.Align", Field, 0}, + {"ProgHeader.Filesz", Field, 0}, + {"ProgHeader.Flags", Field, 0}, + {"ProgHeader.Memsz", Field, 0}, + {"ProgHeader.Off", Field, 0}, + {"ProgHeader.Paddr", Field, 0}, + {"ProgHeader.Type", Field, 0}, + {"ProgHeader.Vaddr", Field, 0}, + {"ProgType", Type, 0}, + {"R_386", Type, 0}, + {"R_386_16", Const, 10}, + {"R_386_32", Const, 0}, + {"R_386_32PLT", Const, 10}, + {"R_386_8", Const, 10}, + {"R_386_COPY", Const, 0}, + {"R_386_GLOB_DAT", Const, 0}, + {"R_386_GOT32", Const, 0}, + {"R_386_GOT32X", Const, 10}, + {"R_386_GOTOFF", Const, 0}, + {"R_386_GOTPC", Const, 0}, + {"R_386_IRELATIVE", Const, 10}, + {"R_386_JMP_SLOT", Const, 0}, + {"R_386_NONE", Const, 0}, + {"R_386_PC16", Const, 10}, + {"R_386_PC32", Const, 0}, + {"R_386_PC8", Const, 10}, + {"R_386_PLT32", Const, 0}, + {"R_386_RELATIVE", Const, 0}, + {"R_386_SIZE32", Const, 10}, + {"R_386_TLS_DESC", Const, 10}, + {"R_386_TLS_DESC_CALL", Const, 10}, + {"R_386_TLS_DTPMOD32", Const, 0}, + {"R_386_TLS_DTPOFF32", Const, 0}, + {"R_386_TLS_GD", Const, 0}, + {"R_386_TLS_GD_32", Const, 0}, + {"R_386_TLS_GD_CALL", Const, 0}, + {"R_386_TLS_GD_POP", Const, 0}, + {"R_386_TLS_GD_PUSH", Const, 0}, + {"R_386_TLS_GOTDESC", Const, 10}, + {"R_386_TLS_GOTIE", Const, 0}, + {"R_386_TLS_IE", Const, 0}, + {"R_386_TLS_IE_32", Const, 0}, + {"R_386_TLS_LDM", Const, 0}, + {"R_386_TLS_LDM_32", Const, 0}, + {"R_386_TLS_LDM_CALL", Const, 0}, + {"R_386_TLS_LDM_POP", Const, 0}, + {"R_386_TLS_LDM_PUSH", Const, 0}, + {"R_386_TLS_LDO_32", Const, 0}, + {"R_386_TLS_LE", Const, 0}, + {"R_386_TLS_LE_32", Const, 0}, + {"R_386_TLS_TPOFF", Const, 0}, + {"R_386_TLS_TPOFF32", Const, 0}, + {"R_390", Type, 7}, + {"R_390_12", Const, 7}, + {"R_390_16", Const, 7}, + {"R_390_20", Const, 7}, + {"R_390_32", Const, 7}, + {"R_390_64", Const, 7}, + {"R_390_8", Const, 7}, + {"R_390_COPY", Const, 7}, + {"R_390_GLOB_DAT", Const, 7}, + {"R_390_GOT12", Const, 7}, + {"R_390_GOT16", Const, 7}, + {"R_390_GOT20", Const, 7}, + {"R_390_GOT32", Const, 7}, + {"R_390_GOT64", Const, 7}, + {"R_390_GOTENT", Const, 7}, + {"R_390_GOTOFF", Const, 7}, + {"R_390_GOTOFF16", Const, 7}, + {"R_390_GOTOFF64", Const, 7}, + {"R_390_GOTPC", Const, 7}, + {"R_390_GOTPCDBL", Const, 7}, + {"R_390_GOTPLT12", Const, 7}, + {"R_390_GOTPLT16", Const, 7}, + {"R_390_GOTPLT20", Const, 7}, + {"R_390_GOTPLT32", Const, 7}, + {"R_390_GOTPLT64", Const, 7}, + {"R_390_GOTPLTENT", Const, 7}, + {"R_390_GOTPLTOFF16", Const, 7}, + {"R_390_GOTPLTOFF32", Const, 7}, + {"R_390_GOTPLTOFF64", Const, 7}, + {"R_390_JMP_SLOT", Const, 7}, + {"R_390_NONE", Const, 7}, + {"R_390_PC16", Const, 7}, + {"R_390_PC16DBL", Const, 7}, + {"R_390_PC32", Const, 7}, + {"R_390_PC32DBL", Const, 7}, + {"R_390_PC64", Const, 7}, + {"R_390_PLT16DBL", Const, 7}, + {"R_390_PLT32", Const, 7}, + {"R_390_PLT32DBL", Const, 7}, + {"R_390_PLT64", Const, 7}, + {"R_390_RELATIVE", Const, 7}, + {"R_390_TLS_DTPMOD", Const, 7}, + {"R_390_TLS_DTPOFF", Const, 7}, + {"R_390_TLS_GD32", Const, 7}, + {"R_390_TLS_GD64", Const, 7}, + {"R_390_TLS_GDCALL", Const, 7}, + {"R_390_TLS_GOTIE12", Const, 7}, + {"R_390_TLS_GOTIE20", Const, 7}, + {"R_390_TLS_GOTIE32", Const, 7}, + {"R_390_TLS_GOTIE64", Const, 7}, + {"R_390_TLS_IE32", Const, 7}, + {"R_390_TLS_IE64", Const, 7}, + {"R_390_TLS_IEENT", Const, 7}, + {"R_390_TLS_LDCALL", Const, 7}, + {"R_390_TLS_LDM32", Const, 7}, + {"R_390_TLS_LDM64", Const, 7}, + {"R_390_TLS_LDO32", Const, 7}, + {"R_390_TLS_LDO64", Const, 7}, + {"R_390_TLS_LE32", Const, 7}, + {"R_390_TLS_LE64", Const, 7}, + {"R_390_TLS_LOAD", Const, 7}, + {"R_390_TLS_TPOFF", Const, 7}, + {"R_AARCH64", Type, 4}, + {"R_AARCH64_ABS16", Const, 4}, + {"R_AARCH64_ABS32", Const, 4}, + {"R_AARCH64_ABS64", Const, 4}, + {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4}, + {"R_AARCH64_CALL26", Const, 4}, + {"R_AARCH64_CONDBR19", Const, 4}, + {"R_AARCH64_COPY", Const, 4}, + {"R_AARCH64_GLOB_DAT", Const, 4}, + {"R_AARCH64_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_IRELATIVE", Const, 4}, + {"R_AARCH64_JUMP26", Const, 4}, + {"R_AARCH64_JUMP_SLOT", Const, 4}, + {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10}, + {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10}, + {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_MOVW_SABS_G1", Const, 4}, + {"R_AARCH64_MOVW_SABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G3", Const, 4}, + {"R_AARCH64_NONE", Const, 4}, + {"R_AARCH64_NULL", Const, 4}, + {"R_AARCH64_P32_ABS16", Const, 4}, + {"R_AARCH64_P32_ABS32", Const, 4}, + {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_P32_CALL26", Const, 4}, + {"R_AARCH64_P32_CONDBR19", Const, 4}, + {"R_AARCH64_P32_COPY", Const, 4}, + {"R_AARCH64_P32_GLOB_DAT", Const, 4}, + {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_IRELATIVE", Const, 4}, + {"R_AARCH64_P32_JUMP26", Const, 4}, + {"R_AARCH64_P32_JUMP_SLOT", Const, 4}, + {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_P32_PREL16", Const, 4}, + {"R_AARCH64_P32_PREL32", Const, 4}, + {"R_AARCH64_P32_RELATIVE", Const, 4}, + {"R_AARCH64_P32_TLSDESC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_P32_TLS_DTPMOD", Const, 4}, + {"R_AARCH64_P32_TLS_DTPREL", Const, 4}, + {"R_AARCH64_P32_TLS_TPREL", Const, 4}, + {"R_AARCH64_P32_TSTBR14", Const, 4}, + {"R_AARCH64_PREL16", Const, 4}, + {"R_AARCH64_PREL32", Const, 4}, + {"R_AARCH64_PREL64", Const, 4}, + {"R_AARCH64_RELATIVE", Const, 4}, + {"R_AARCH64_TLSDESC", Const, 4}, + {"R_AARCH64_TLSDESC_ADD", Const, 4}, + {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_LDR", Const, 4}, + {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G1", Const, 4}, + {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G1", Const, 10}, + {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4}, + {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10}, + {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4}, + {"R_AARCH64_TLS_DTPMOD64", Const, 4}, + {"R_AARCH64_TLS_DTPREL64", Const, 4}, + {"R_AARCH64_TLS_TPREL64", Const, 4}, + {"R_AARCH64_TSTBR14", Const, 4}, + {"R_ALPHA", Type, 0}, + {"R_ALPHA_BRADDR", Const, 0}, + {"R_ALPHA_COPY", Const, 0}, + {"R_ALPHA_GLOB_DAT", Const, 0}, + {"R_ALPHA_GPDISP", Const, 0}, + {"R_ALPHA_GPREL32", Const, 0}, + {"R_ALPHA_GPRELHIGH", Const, 0}, + {"R_ALPHA_GPRELLOW", Const, 0}, + {"R_ALPHA_GPVALUE", Const, 0}, + {"R_ALPHA_HINT", Const, 0}, + {"R_ALPHA_IMMED_BR_HI32", Const, 0}, + {"R_ALPHA_IMMED_GP_16", Const, 0}, + {"R_ALPHA_IMMED_GP_HI32", Const, 0}, + {"R_ALPHA_IMMED_LO32", Const, 0}, + {"R_ALPHA_IMMED_SCN_HI32", Const, 0}, + {"R_ALPHA_JMP_SLOT", Const, 0}, + {"R_ALPHA_LITERAL", Const, 0}, + {"R_ALPHA_LITUSE", Const, 0}, + {"R_ALPHA_NONE", Const, 0}, + {"R_ALPHA_OP_PRSHIFT", Const, 0}, + {"R_ALPHA_OP_PSUB", Const, 0}, + {"R_ALPHA_OP_PUSH", Const, 0}, + {"R_ALPHA_OP_STORE", Const, 0}, + {"R_ALPHA_REFLONG", Const, 0}, + {"R_ALPHA_REFQUAD", Const, 0}, + {"R_ALPHA_RELATIVE", Const, 0}, + {"R_ALPHA_SREL16", Const, 0}, + {"R_ALPHA_SREL32", Const, 0}, + {"R_ALPHA_SREL64", Const, 0}, + {"R_ARM", Type, 0}, + {"R_ARM_ABS12", Const, 0}, + {"R_ARM_ABS16", Const, 0}, + {"R_ARM_ABS32", Const, 0}, + {"R_ARM_ABS32_NOI", Const, 10}, + {"R_ARM_ABS8", Const, 0}, + {"R_ARM_ALU_PCREL_15_8", Const, 10}, + {"R_ARM_ALU_PCREL_23_15", Const, 10}, + {"R_ARM_ALU_PCREL_7_0", Const, 10}, + {"R_ARM_ALU_PC_G0", Const, 10}, + {"R_ARM_ALU_PC_G0_NC", Const, 10}, + {"R_ARM_ALU_PC_G1", Const, 10}, + {"R_ARM_ALU_PC_G1_NC", Const, 10}, + {"R_ARM_ALU_PC_G2", Const, 10}, + {"R_ARM_ALU_SBREL_19_12_NC", Const, 10}, + {"R_ARM_ALU_SBREL_27_20_CK", Const, 10}, + {"R_ARM_ALU_SB_G0", Const, 10}, + {"R_ARM_ALU_SB_G0_NC", Const, 10}, + {"R_ARM_ALU_SB_G1", Const, 10}, + {"R_ARM_ALU_SB_G1_NC", Const, 10}, + {"R_ARM_ALU_SB_G2", Const, 10}, + {"R_ARM_AMP_VCALL9", Const, 0}, + {"R_ARM_BASE_ABS", Const, 10}, + {"R_ARM_CALL", Const, 10}, + {"R_ARM_COPY", Const, 0}, + {"R_ARM_GLOB_DAT", Const, 0}, + {"R_ARM_GNU_VTENTRY", Const, 0}, + {"R_ARM_GNU_VTINHERIT", Const, 0}, + {"R_ARM_GOT32", Const, 0}, + {"R_ARM_GOTOFF", Const, 0}, + {"R_ARM_GOTOFF12", Const, 10}, + {"R_ARM_GOTPC", Const, 0}, + {"R_ARM_GOTRELAX", Const, 10}, + {"R_ARM_GOT_ABS", Const, 10}, + {"R_ARM_GOT_BREL12", Const, 10}, + {"R_ARM_GOT_PREL", Const, 10}, + {"R_ARM_IRELATIVE", Const, 10}, + {"R_ARM_JUMP24", Const, 10}, + {"R_ARM_JUMP_SLOT", Const, 0}, + {"R_ARM_LDC_PC_G0", Const, 10}, + {"R_ARM_LDC_PC_G1", Const, 10}, + {"R_ARM_LDC_PC_G2", Const, 10}, + {"R_ARM_LDC_SB_G0", Const, 10}, + {"R_ARM_LDC_SB_G1", Const, 10}, + {"R_ARM_LDC_SB_G2", Const, 10}, + {"R_ARM_LDRS_PC_G0", Const, 10}, + {"R_ARM_LDRS_PC_G1", Const, 10}, + {"R_ARM_LDRS_PC_G2", Const, 10}, + {"R_ARM_LDRS_SB_G0", Const, 10}, + {"R_ARM_LDRS_SB_G1", Const, 10}, + {"R_ARM_LDRS_SB_G2", Const, 10}, + {"R_ARM_LDR_PC_G1", Const, 10}, + {"R_ARM_LDR_PC_G2", Const, 10}, + {"R_ARM_LDR_SBREL_11_10_NC", Const, 10}, + {"R_ARM_LDR_SB_G0", Const, 10}, + {"R_ARM_LDR_SB_G1", Const, 10}, + {"R_ARM_LDR_SB_G2", Const, 10}, + {"R_ARM_ME_TOO", Const, 10}, + {"R_ARM_MOVT_ABS", Const, 10}, + {"R_ARM_MOVT_BREL", Const, 10}, + {"R_ARM_MOVT_PREL", Const, 10}, + {"R_ARM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_MOVW_BREL", Const, 10}, + {"R_ARM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_NONE", Const, 0}, + {"R_ARM_PC13", Const, 0}, + {"R_ARM_PC24", Const, 0}, + {"R_ARM_PLT32", Const, 0}, + {"R_ARM_PLT32_ABS", Const, 10}, + {"R_ARM_PREL31", Const, 10}, + {"R_ARM_PRIVATE_0", Const, 10}, + {"R_ARM_PRIVATE_1", Const, 10}, + {"R_ARM_PRIVATE_10", Const, 10}, + {"R_ARM_PRIVATE_11", Const, 10}, + {"R_ARM_PRIVATE_12", Const, 10}, + {"R_ARM_PRIVATE_13", Const, 10}, + {"R_ARM_PRIVATE_14", Const, 10}, + {"R_ARM_PRIVATE_15", Const, 10}, + {"R_ARM_PRIVATE_2", Const, 10}, + {"R_ARM_PRIVATE_3", Const, 10}, + {"R_ARM_PRIVATE_4", Const, 10}, + {"R_ARM_PRIVATE_5", Const, 10}, + {"R_ARM_PRIVATE_6", Const, 10}, + {"R_ARM_PRIVATE_7", Const, 10}, + {"R_ARM_PRIVATE_8", Const, 10}, + {"R_ARM_PRIVATE_9", Const, 10}, + {"R_ARM_RABS32", Const, 0}, + {"R_ARM_RBASE", Const, 0}, + {"R_ARM_REL32", Const, 0}, + {"R_ARM_REL32_NOI", Const, 10}, + {"R_ARM_RELATIVE", Const, 0}, + {"R_ARM_RPC24", Const, 0}, + {"R_ARM_RREL32", Const, 0}, + {"R_ARM_RSBREL32", Const, 0}, + {"R_ARM_RXPC25", Const, 10}, + {"R_ARM_SBREL31", Const, 10}, + {"R_ARM_SBREL32", Const, 0}, + {"R_ARM_SWI24", Const, 0}, + {"R_ARM_TARGET1", Const, 10}, + {"R_ARM_TARGET2", Const, 10}, + {"R_ARM_THM_ABS5", Const, 0}, + {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G3", Const, 10}, + {"R_ARM_THM_ALU_PREL_11_0", Const, 10}, + {"R_ARM_THM_GOT_BREL12", Const, 10}, + {"R_ARM_THM_JUMP11", Const, 10}, + {"R_ARM_THM_JUMP19", Const, 10}, + {"R_ARM_THM_JUMP24", Const, 10}, + {"R_ARM_THM_JUMP6", Const, 10}, + {"R_ARM_THM_JUMP8", Const, 10}, + {"R_ARM_THM_MOVT_ABS", Const, 10}, + {"R_ARM_THM_MOVT_BREL", Const, 10}, + {"R_ARM_THM_MOVT_PREL", Const, 10}, + {"R_ARM_THM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_THM_MOVW_BREL", Const, 10}, + {"R_ARM_THM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_THM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_THM_PC12", Const, 10}, + {"R_ARM_THM_PC22", Const, 0}, + {"R_ARM_THM_PC8", Const, 0}, + {"R_ARM_THM_RPC22", Const, 0}, + {"R_ARM_THM_SWI8", Const, 0}, + {"R_ARM_THM_TLS_CALL", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ16", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ32", Const, 10}, + {"R_ARM_THM_XPC22", Const, 0}, + {"R_ARM_TLS_CALL", Const, 10}, + {"R_ARM_TLS_DESCSEQ", Const, 10}, + {"R_ARM_TLS_DTPMOD32", Const, 10}, + {"R_ARM_TLS_DTPOFF32", Const, 10}, + {"R_ARM_TLS_GD32", Const, 10}, + {"R_ARM_TLS_GOTDESC", Const, 10}, + {"R_ARM_TLS_IE12GP", Const, 10}, + {"R_ARM_TLS_IE32", Const, 10}, + {"R_ARM_TLS_LDM32", Const, 10}, + {"R_ARM_TLS_LDO12", Const, 10}, + {"R_ARM_TLS_LDO32", Const, 10}, + {"R_ARM_TLS_LE12", Const, 10}, + {"R_ARM_TLS_LE32", Const, 10}, + {"R_ARM_TLS_TPOFF32", Const, 10}, + {"R_ARM_V4BX", Const, 10}, + {"R_ARM_XPC25", Const, 0}, + {"R_INFO", Func, 0}, + {"R_INFO32", Func, 0}, + {"R_LARCH", Type, 19}, + {"R_LARCH_32", Const, 19}, + {"R_LARCH_32_PCREL", Const, 20}, + {"R_LARCH_64", Const, 19}, + {"R_LARCH_64_PCREL", Const, 22}, + {"R_LARCH_ABS64_HI12", Const, 20}, + {"R_LARCH_ABS64_LO20", Const, 20}, + {"R_LARCH_ABS_HI20", Const, 20}, + {"R_LARCH_ABS_LO12", Const, 20}, + {"R_LARCH_ADD16", Const, 19}, + {"R_LARCH_ADD24", Const, 19}, + {"R_LARCH_ADD32", Const, 19}, + {"R_LARCH_ADD6", Const, 22}, + {"R_LARCH_ADD64", Const, 19}, + {"R_LARCH_ADD8", Const, 19}, + {"R_LARCH_ADD_ULEB128", Const, 22}, + {"R_LARCH_ALIGN", Const, 22}, + {"R_LARCH_B16", Const, 20}, + {"R_LARCH_B21", Const, 20}, + {"R_LARCH_B26", Const, 20}, + {"R_LARCH_CFA", Const, 22}, + {"R_LARCH_COPY", Const, 19}, + {"R_LARCH_DELETE", Const, 22}, + {"R_LARCH_GNU_VTENTRY", Const, 20}, + {"R_LARCH_GNU_VTINHERIT", Const, 20}, + {"R_LARCH_GOT64_HI12", Const, 20}, + {"R_LARCH_GOT64_LO20", Const, 20}, + {"R_LARCH_GOT64_PC_HI12", Const, 20}, + {"R_LARCH_GOT64_PC_LO20", Const, 20}, + {"R_LARCH_GOT_HI20", Const, 20}, + {"R_LARCH_GOT_LO12", Const, 20}, + {"R_LARCH_GOT_PC_HI20", Const, 20}, + {"R_LARCH_GOT_PC_LO12", Const, 20}, + {"R_LARCH_IRELATIVE", Const, 19}, + {"R_LARCH_JUMP_SLOT", Const, 19}, + {"R_LARCH_MARK_LA", Const, 19}, + {"R_LARCH_MARK_PCREL", Const, 19}, + {"R_LARCH_NONE", Const, 19}, + {"R_LARCH_PCALA64_HI12", Const, 20}, + {"R_LARCH_PCALA64_LO20", Const, 20}, + {"R_LARCH_PCALA_HI20", Const, 20}, + {"R_LARCH_PCALA_LO12", Const, 20}, + {"R_LARCH_PCREL20_S2", Const, 22}, + {"R_LARCH_RELATIVE", Const, 19}, + {"R_LARCH_RELAX", Const, 20}, + {"R_LARCH_SOP_ADD", Const, 19}, + {"R_LARCH_SOP_AND", Const, 19}, + {"R_LARCH_SOP_ASSERT", Const, 19}, + {"R_LARCH_SOP_IF_ELSE", Const, 19}, + {"R_LARCH_SOP_NOT", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_12", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_5", Const, 19}, + {"R_LARCH_SOP_POP_32_S_5_20", Const, 19}, + {"R_LARCH_SOP_POP_32_U", Const, 19}, + {"R_LARCH_SOP_POP_32_U_10_12", Const, 19}, + {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19}, + {"R_LARCH_SOP_PUSH_DUP", Const, 19}, + {"R_LARCH_SOP_PUSH_GPREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19}, + {"R_LARCH_SOP_SL", Const, 19}, + {"R_LARCH_SOP_SR", Const, 19}, + {"R_LARCH_SOP_SUB", Const, 19}, + {"R_LARCH_SUB16", Const, 19}, + {"R_LARCH_SUB24", Const, 19}, + {"R_LARCH_SUB32", Const, 19}, + {"R_LARCH_SUB6", Const, 22}, + {"R_LARCH_SUB64", Const, 19}, + {"R_LARCH_SUB8", Const, 19}, + {"R_LARCH_SUB_ULEB128", Const, 22}, + {"R_LARCH_TLS_DTPMOD32", Const, 19}, + {"R_LARCH_TLS_DTPMOD64", Const, 19}, + {"R_LARCH_TLS_DTPREL32", Const, 19}, + {"R_LARCH_TLS_DTPREL64", Const, 19}, + {"R_LARCH_TLS_GD_HI20", Const, 20}, + {"R_LARCH_TLS_GD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE64_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_LO20", Const, 20}, + {"R_LARCH_TLS_IE64_PC_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_PC_LO20", Const, 20}, + {"R_LARCH_TLS_IE_HI20", Const, 20}, + {"R_LARCH_TLS_IE_LO12", Const, 20}, + {"R_LARCH_TLS_IE_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE_PC_LO12", Const, 20}, + {"R_LARCH_TLS_LD_HI20", Const, 20}, + {"R_LARCH_TLS_LD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_LE64_HI12", Const, 20}, + {"R_LARCH_TLS_LE64_LO20", Const, 20}, + {"R_LARCH_TLS_LE_HI20", Const, 20}, + {"R_LARCH_TLS_LE_LO12", Const, 20}, + {"R_LARCH_TLS_TPREL32", Const, 19}, + {"R_LARCH_TLS_TPREL64", Const, 19}, + {"R_MIPS", Type, 6}, + {"R_MIPS_16", Const, 6}, + {"R_MIPS_26", Const, 6}, + {"R_MIPS_32", Const, 6}, + {"R_MIPS_64", Const, 6}, + {"R_MIPS_ADD_IMMEDIATE", Const, 6}, + {"R_MIPS_CALL16", Const, 6}, + {"R_MIPS_CALL_HI16", Const, 6}, + {"R_MIPS_CALL_LO16", Const, 6}, + {"R_MIPS_DELETE", Const, 6}, + {"R_MIPS_GOT16", Const, 6}, + {"R_MIPS_GOT_DISP", Const, 6}, + {"R_MIPS_GOT_HI16", Const, 6}, + {"R_MIPS_GOT_LO16", Const, 6}, + {"R_MIPS_GOT_OFST", Const, 6}, + {"R_MIPS_GOT_PAGE", Const, 6}, + {"R_MIPS_GPREL16", Const, 6}, + {"R_MIPS_GPREL32", Const, 6}, + {"R_MIPS_HI16", Const, 6}, + {"R_MIPS_HIGHER", Const, 6}, + {"R_MIPS_HIGHEST", Const, 6}, + {"R_MIPS_INSERT_A", Const, 6}, + {"R_MIPS_INSERT_B", Const, 6}, + {"R_MIPS_JALR", Const, 6}, + {"R_MIPS_LITERAL", Const, 6}, + {"R_MIPS_LO16", Const, 6}, + {"R_MIPS_NONE", Const, 6}, + {"R_MIPS_PC16", Const, 6}, + {"R_MIPS_PC32", Const, 22}, + {"R_MIPS_PJUMP", Const, 6}, + {"R_MIPS_REL16", Const, 6}, + {"R_MIPS_REL32", Const, 6}, + {"R_MIPS_RELGOT", Const, 6}, + {"R_MIPS_SCN_DISP", Const, 6}, + {"R_MIPS_SHIFT5", Const, 6}, + {"R_MIPS_SHIFT6", Const, 6}, + {"R_MIPS_SUB", Const, 6}, + {"R_MIPS_TLS_DTPMOD32", Const, 6}, + {"R_MIPS_TLS_DTPMOD64", Const, 6}, + {"R_MIPS_TLS_DTPREL32", Const, 6}, + {"R_MIPS_TLS_DTPREL64", Const, 6}, + {"R_MIPS_TLS_DTPREL_HI16", Const, 6}, + {"R_MIPS_TLS_DTPREL_LO16", Const, 6}, + {"R_MIPS_TLS_GD", Const, 6}, + {"R_MIPS_TLS_GOTTPREL", Const, 6}, + {"R_MIPS_TLS_LDM", Const, 6}, + {"R_MIPS_TLS_TPREL32", Const, 6}, + {"R_MIPS_TLS_TPREL64", Const, 6}, + {"R_MIPS_TLS_TPREL_HI16", Const, 6}, + {"R_MIPS_TLS_TPREL_LO16", Const, 6}, + {"R_PPC", Type, 0}, + {"R_PPC64", Type, 5}, + {"R_PPC64_ADDR14", Const, 5}, + {"R_PPC64_ADDR14_BRNTAKEN", Const, 5}, + {"R_PPC64_ADDR14_BRTAKEN", Const, 5}, + {"R_PPC64_ADDR16", Const, 5}, + {"R_PPC64_ADDR16_DS", Const, 5}, + {"R_PPC64_ADDR16_HA", Const, 5}, + {"R_PPC64_ADDR16_HI", Const, 5}, + {"R_PPC64_ADDR16_HIGH", Const, 10}, + {"R_PPC64_ADDR16_HIGHA", Const, 10}, + {"R_PPC64_ADDR16_HIGHER", Const, 5}, + {"R_PPC64_ADDR16_HIGHER34", Const, 20}, + {"R_PPC64_ADDR16_HIGHERA", Const, 5}, + {"R_PPC64_ADDR16_HIGHERA34", Const, 20}, + {"R_PPC64_ADDR16_HIGHEST", Const, 5}, + {"R_PPC64_ADDR16_HIGHEST34", Const, 20}, + {"R_PPC64_ADDR16_HIGHESTA", Const, 5}, + {"R_PPC64_ADDR16_HIGHESTA34", Const, 20}, + {"R_PPC64_ADDR16_LO", Const, 5}, + {"R_PPC64_ADDR16_LO_DS", Const, 5}, + {"R_PPC64_ADDR24", Const, 5}, + {"R_PPC64_ADDR32", Const, 5}, + {"R_PPC64_ADDR64", Const, 5}, + {"R_PPC64_ADDR64_LOCAL", Const, 10}, + {"R_PPC64_COPY", Const, 20}, + {"R_PPC64_D28", Const, 20}, + {"R_PPC64_D34", Const, 20}, + {"R_PPC64_D34_HA30", Const, 20}, + {"R_PPC64_D34_HI30", Const, 20}, + {"R_PPC64_D34_LO", Const, 20}, + {"R_PPC64_DTPMOD64", Const, 5}, + {"R_PPC64_DTPREL16", Const, 5}, + {"R_PPC64_DTPREL16_DS", Const, 5}, + {"R_PPC64_DTPREL16_HA", Const, 5}, + {"R_PPC64_DTPREL16_HI", Const, 5}, + {"R_PPC64_DTPREL16_HIGH", Const, 10}, + {"R_PPC64_DTPREL16_HIGHA", Const, 10}, + {"R_PPC64_DTPREL16_HIGHER", Const, 5}, + {"R_PPC64_DTPREL16_HIGHERA", Const, 5}, + {"R_PPC64_DTPREL16_HIGHEST", Const, 5}, + {"R_PPC64_DTPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_DTPREL16_LO", Const, 5}, + {"R_PPC64_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_DTPREL34", Const, 20}, + {"R_PPC64_DTPREL64", Const, 5}, + {"R_PPC64_ENTRY", Const, 10}, + {"R_PPC64_GLOB_DAT", Const, 20}, + {"R_PPC64_GNU_VTENTRY", Const, 20}, + {"R_PPC64_GNU_VTINHERIT", Const, 20}, + {"R_PPC64_GOT16", Const, 5}, + {"R_PPC64_GOT16_DS", Const, 5}, + {"R_PPC64_GOT16_HA", Const, 5}, + {"R_PPC64_GOT16_HI", Const, 5}, + {"R_PPC64_GOT16_LO", Const, 5}, + {"R_PPC64_GOT16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HA", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HI", Const, 5}, + {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20}, + {"R_PPC64_GOT_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSGD16", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSGD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSLD16", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSLD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TPREL16_DS", Const, 5}, + {"R_PPC64_GOT_TPREL16_HA", Const, 5}, + {"R_PPC64_GOT_TPREL16_HI", Const, 5}, + {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_TPREL_PCREL34", Const, 20}, + {"R_PPC64_IRELATIVE", Const, 10}, + {"R_PPC64_JMP_IREL", Const, 10}, + {"R_PPC64_JMP_SLOT", Const, 5}, + {"R_PPC64_NONE", Const, 5}, + {"R_PPC64_PCREL28", Const, 20}, + {"R_PPC64_PCREL34", Const, 20}, + {"R_PPC64_PCREL_OPT", Const, 20}, + {"R_PPC64_PLT16_HA", Const, 20}, + {"R_PPC64_PLT16_HI", Const, 20}, + {"R_PPC64_PLT16_LO", Const, 20}, + {"R_PPC64_PLT16_LO_DS", Const, 10}, + {"R_PPC64_PLT32", Const, 20}, + {"R_PPC64_PLT64", Const, 20}, + {"R_PPC64_PLTCALL", Const, 20}, + {"R_PPC64_PLTCALL_NOTOC", Const, 20}, + {"R_PPC64_PLTGOT16", Const, 10}, + {"R_PPC64_PLTGOT16_DS", Const, 10}, + {"R_PPC64_PLTGOT16_HA", Const, 10}, + {"R_PPC64_PLTGOT16_HI", Const, 10}, + {"R_PPC64_PLTGOT16_LO", Const, 10}, + {"R_PPC64_PLTGOT_LO_DS", Const, 10}, + {"R_PPC64_PLTREL32", Const, 20}, + {"R_PPC64_PLTREL64", Const, 20}, + {"R_PPC64_PLTSEQ", Const, 20}, + {"R_PPC64_PLTSEQ_NOTOC", Const, 20}, + {"R_PPC64_PLT_PCREL34", Const, 20}, + {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20}, + {"R_PPC64_REL14", Const, 5}, + {"R_PPC64_REL14_BRNTAKEN", Const, 5}, + {"R_PPC64_REL14_BRTAKEN", Const, 5}, + {"R_PPC64_REL16", Const, 5}, + {"R_PPC64_REL16DX_HA", Const, 10}, + {"R_PPC64_REL16_HA", Const, 5}, + {"R_PPC64_REL16_HI", Const, 5}, + {"R_PPC64_REL16_HIGH", Const, 20}, + {"R_PPC64_REL16_HIGHA", Const, 20}, + {"R_PPC64_REL16_HIGHER", Const, 20}, + {"R_PPC64_REL16_HIGHER34", Const, 20}, + {"R_PPC64_REL16_HIGHERA", Const, 20}, + {"R_PPC64_REL16_HIGHERA34", Const, 20}, + {"R_PPC64_REL16_HIGHEST", Const, 20}, + {"R_PPC64_REL16_HIGHEST34", Const, 20}, + {"R_PPC64_REL16_HIGHESTA", Const, 20}, + {"R_PPC64_REL16_HIGHESTA34", Const, 20}, + {"R_PPC64_REL16_LO", Const, 5}, + {"R_PPC64_REL24", Const, 5}, + {"R_PPC64_REL24_NOTOC", Const, 10}, + {"R_PPC64_REL24_P9NOTOC", Const, 21}, + {"R_PPC64_REL30", Const, 20}, + {"R_PPC64_REL32", Const, 5}, + {"R_PPC64_REL64", Const, 5}, + {"R_PPC64_RELATIVE", Const, 18}, + {"R_PPC64_SECTOFF", Const, 20}, + {"R_PPC64_SECTOFF_DS", Const, 10}, + {"R_PPC64_SECTOFF_HA", Const, 20}, + {"R_PPC64_SECTOFF_HI", Const, 20}, + {"R_PPC64_SECTOFF_LO", Const, 20}, + {"R_PPC64_SECTOFF_LO_DS", Const, 10}, + {"R_PPC64_TLS", Const, 5}, + {"R_PPC64_TLSGD", Const, 5}, + {"R_PPC64_TLSLD", Const, 5}, + {"R_PPC64_TOC", Const, 5}, + {"R_PPC64_TOC16", Const, 5}, + {"R_PPC64_TOC16_DS", Const, 5}, + {"R_PPC64_TOC16_HA", Const, 5}, + {"R_PPC64_TOC16_HI", Const, 5}, + {"R_PPC64_TOC16_LO", Const, 5}, + {"R_PPC64_TOC16_LO_DS", Const, 5}, + {"R_PPC64_TOCSAVE", Const, 10}, + {"R_PPC64_TPREL16", Const, 5}, + {"R_PPC64_TPREL16_DS", Const, 5}, + {"R_PPC64_TPREL16_HA", Const, 5}, + {"R_PPC64_TPREL16_HI", Const, 5}, + {"R_PPC64_TPREL16_HIGH", Const, 10}, + {"R_PPC64_TPREL16_HIGHA", Const, 10}, + {"R_PPC64_TPREL16_HIGHER", Const, 5}, + {"R_PPC64_TPREL16_HIGHERA", Const, 5}, + {"R_PPC64_TPREL16_HIGHEST", Const, 5}, + {"R_PPC64_TPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_TPREL16_LO", Const, 5}, + {"R_PPC64_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_TPREL34", Const, 20}, + {"R_PPC64_TPREL64", Const, 5}, + {"R_PPC64_UADDR16", Const, 20}, + {"R_PPC64_UADDR32", Const, 20}, + {"R_PPC64_UADDR64", Const, 20}, + {"R_PPC_ADDR14", Const, 0}, + {"R_PPC_ADDR14_BRNTAKEN", Const, 0}, + {"R_PPC_ADDR14_BRTAKEN", Const, 0}, + {"R_PPC_ADDR16", Const, 0}, + {"R_PPC_ADDR16_HA", Const, 0}, + {"R_PPC_ADDR16_HI", Const, 0}, + {"R_PPC_ADDR16_LO", Const, 0}, + {"R_PPC_ADDR24", Const, 0}, + {"R_PPC_ADDR32", Const, 0}, + {"R_PPC_COPY", Const, 0}, + {"R_PPC_DTPMOD32", Const, 0}, + {"R_PPC_DTPREL16", Const, 0}, + {"R_PPC_DTPREL16_HA", Const, 0}, + {"R_PPC_DTPREL16_HI", Const, 0}, + {"R_PPC_DTPREL16_LO", Const, 0}, + {"R_PPC_DTPREL32", Const, 0}, + {"R_PPC_EMB_BIT_FLD", Const, 0}, + {"R_PPC_EMB_MRKREF", Const, 0}, + {"R_PPC_EMB_NADDR16", Const, 0}, + {"R_PPC_EMB_NADDR16_HA", Const, 0}, + {"R_PPC_EMB_NADDR16_HI", Const, 0}, + {"R_PPC_EMB_NADDR16_LO", Const, 0}, + {"R_PPC_EMB_NADDR32", Const, 0}, + {"R_PPC_EMB_RELSDA", Const, 0}, + {"R_PPC_EMB_RELSEC16", Const, 0}, + {"R_PPC_EMB_RELST_HA", Const, 0}, + {"R_PPC_EMB_RELST_HI", Const, 0}, + {"R_PPC_EMB_RELST_LO", Const, 0}, + {"R_PPC_EMB_SDA21", Const, 0}, + {"R_PPC_EMB_SDA2I16", Const, 0}, + {"R_PPC_EMB_SDA2REL", Const, 0}, + {"R_PPC_EMB_SDAI16", Const, 0}, + {"R_PPC_GLOB_DAT", Const, 0}, + {"R_PPC_GOT16", Const, 0}, + {"R_PPC_GOT16_HA", Const, 0}, + {"R_PPC_GOT16_HI", Const, 0}, + {"R_PPC_GOT16_LO", Const, 0}, + {"R_PPC_GOT_TLSGD16", Const, 0}, + {"R_PPC_GOT_TLSGD16_HA", Const, 0}, + {"R_PPC_GOT_TLSGD16_HI", Const, 0}, + {"R_PPC_GOT_TLSGD16_LO", Const, 0}, + {"R_PPC_GOT_TLSLD16", Const, 0}, + {"R_PPC_GOT_TLSLD16_HA", Const, 0}, + {"R_PPC_GOT_TLSLD16_HI", Const, 0}, + {"R_PPC_GOT_TLSLD16_LO", Const, 0}, + {"R_PPC_GOT_TPREL16", Const, 0}, + {"R_PPC_GOT_TPREL16_HA", Const, 0}, + {"R_PPC_GOT_TPREL16_HI", Const, 0}, + {"R_PPC_GOT_TPREL16_LO", Const, 0}, + {"R_PPC_JMP_SLOT", Const, 0}, + {"R_PPC_LOCAL24PC", Const, 0}, + {"R_PPC_NONE", Const, 0}, + {"R_PPC_PLT16_HA", Const, 0}, + {"R_PPC_PLT16_HI", Const, 0}, + {"R_PPC_PLT16_LO", Const, 0}, + {"R_PPC_PLT32", Const, 0}, + {"R_PPC_PLTREL24", Const, 0}, + {"R_PPC_PLTREL32", Const, 0}, + {"R_PPC_REL14", Const, 0}, + {"R_PPC_REL14_BRNTAKEN", Const, 0}, + {"R_PPC_REL14_BRTAKEN", Const, 0}, + {"R_PPC_REL24", Const, 0}, + {"R_PPC_REL32", Const, 0}, + {"R_PPC_RELATIVE", Const, 0}, + {"R_PPC_SDAREL16", Const, 0}, + {"R_PPC_SECTOFF", Const, 0}, + {"R_PPC_SECTOFF_HA", Const, 0}, + {"R_PPC_SECTOFF_HI", Const, 0}, + {"R_PPC_SECTOFF_LO", Const, 0}, + {"R_PPC_TLS", Const, 0}, + {"R_PPC_TPREL16", Const, 0}, + {"R_PPC_TPREL16_HA", Const, 0}, + {"R_PPC_TPREL16_HI", Const, 0}, + {"R_PPC_TPREL16_LO", Const, 0}, + {"R_PPC_TPREL32", Const, 0}, + {"R_PPC_UADDR16", Const, 0}, + {"R_PPC_UADDR32", Const, 0}, + {"R_RISCV", Type, 11}, + {"R_RISCV_32", Const, 11}, + {"R_RISCV_32_PCREL", Const, 12}, + {"R_RISCV_64", Const, 11}, + {"R_RISCV_ADD16", Const, 11}, + {"R_RISCV_ADD32", Const, 11}, + {"R_RISCV_ADD64", Const, 11}, + {"R_RISCV_ADD8", Const, 11}, + {"R_RISCV_ALIGN", Const, 11}, + {"R_RISCV_BRANCH", Const, 11}, + {"R_RISCV_CALL", Const, 11}, + {"R_RISCV_CALL_PLT", Const, 11}, + {"R_RISCV_COPY", Const, 11}, + {"R_RISCV_GNU_VTENTRY", Const, 11}, + {"R_RISCV_GNU_VTINHERIT", Const, 11}, + {"R_RISCV_GOT_HI20", Const, 11}, + {"R_RISCV_GPREL_I", Const, 11}, + {"R_RISCV_GPREL_S", Const, 11}, + {"R_RISCV_HI20", Const, 11}, + {"R_RISCV_JAL", Const, 11}, + {"R_RISCV_JUMP_SLOT", Const, 11}, + {"R_RISCV_LO12_I", Const, 11}, + {"R_RISCV_LO12_S", Const, 11}, + {"R_RISCV_NONE", Const, 11}, + {"R_RISCV_PCREL_HI20", Const, 11}, + {"R_RISCV_PCREL_LO12_I", Const, 11}, + {"R_RISCV_PCREL_LO12_S", Const, 11}, + {"R_RISCV_RELATIVE", Const, 11}, + {"R_RISCV_RELAX", Const, 11}, + {"R_RISCV_RVC_BRANCH", Const, 11}, + {"R_RISCV_RVC_JUMP", Const, 11}, + {"R_RISCV_RVC_LUI", Const, 11}, + {"R_RISCV_SET16", Const, 11}, + {"R_RISCV_SET32", Const, 11}, + {"R_RISCV_SET6", Const, 11}, + {"R_RISCV_SET8", Const, 11}, + {"R_RISCV_SUB16", Const, 11}, + {"R_RISCV_SUB32", Const, 11}, + {"R_RISCV_SUB6", Const, 11}, + {"R_RISCV_SUB64", Const, 11}, + {"R_RISCV_SUB8", Const, 11}, + {"R_RISCV_TLS_DTPMOD32", Const, 11}, + {"R_RISCV_TLS_DTPMOD64", Const, 11}, + {"R_RISCV_TLS_DTPREL32", Const, 11}, + {"R_RISCV_TLS_DTPREL64", Const, 11}, + {"R_RISCV_TLS_GD_HI20", Const, 11}, + {"R_RISCV_TLS_GOT_HI20", Const, 11}, + {"R_RISCV_TLS_TPREL32", Const, 11}, + {"R_RISCV_TLS_TPREL64", Const, 11}, + {"R_RISCV_TPREL_ADD", Const, 11}, + {"R_RISCV_TPREL_HI20", Const, 11}, + {"R_RISCV_TPREL_I", Const, 11}, + {"R_RISCV_TPREL_LO12_I", Const, 11}, + {"R_RISCV_TPREL_LO12_S", Const, 11}, + {"R_RISCV_TPREL_S", Const, 11}, + {"R_SPARC", Type, 0}, + {"R_SPARC_10", Const, 0}, + {"R_SPARC_11", Const, 0}, + {"R_SPARC_13", Const, 0}, + {"R_SPARC_16", Const, 0}, + {"R_SPARC_22", Const, 0}, + {"R_SPARC_32", Const, 0}, + {"R_SPARC_5", Const, 0}, + {"R_SPARC_6", Const, 0}, + {"R_SPARC_64", Const, 0}, + {"R_SPARC_7", Const, 0}, + {"R_SPARC_8", Const, 0}, + {"R_SPARC_COPY", Const, 0}, + {"R_SPARC_DISP16", Const, 0}, + {"R_SPARC_DISP32", Const, 0}, + {"R_SPARC_DISP64", Const, 0}, + {"R_SPARC_DISP8", Const, 0}, + {"R_SPARC_GLOB_DAT", Const, 0}, + {"R_SPARC_GLOB_JMP", Const, 0}, + {"R_SPARC_GOT10", Const, 0}, + {"R_SPARC_GOT13", Const, 0}, + {"R_SPARC_GOT22", Const, 0}, + {"R_SPARC_H44", Const, 0}, + {"R_SPARC_HH22", Const, 0}, + {"R_SPARC_HI22", Const, 0}, + {"R_SPARC_HIPLT22", Const, 0}, + {"R_SPARC_HIX22", Const, 0}, + {"R_SPARC_HM10", Const, 0}, + {"R_SPARC_JMP_SLOT", Const, 0}, + {"R_SPARC_L44", Const, 0}, + {"R_SPARC_LM22", Const, 0}, + {"R_SPARC_LO10", Const, 0}, + {"R_SPARC_LOPLT10", Const, 0}, + {"R_SPARC_LOX10", Const, 0}, + {"R_SPARC_M44", Const, 0}, + {"R_SPARC_NONE", Const, 0}, + {"R_SPARC_OLO10", Const, 0}, + {"R_SPARC_PC10", Const, 0}, + {"R_SPARC_PC22", Const, 0}, + {"R_SPARC_PCPLT10", Const, 0}, + {"R_SPARC_PCPLT22", Const, 0}, + {"R_SPARC_PCPLT32", Const, 0}, + {"R_SPARC_PC_HH22", Const, 0}, + {"R_SPARC_PC_HM10", Const, 0}, + {"R_SPARC_PC_LM22", Const, 0}, + {"R_SPARC_PLT32", Const, 0}, + {"R_SPARC_PLT64", Const, 0}, + {"R_SPARC_REGISTER", Const, 0}, + {"R_SPARC_RELATIVE", Const, 0}, + {"R_SPARC_UA16", Const, 0}, + {"R_SPARC_UA32", Const, 0}, + {"R_SPARC_UA64", Const, 0}, + {"R_SPARC_WDISP16", Const, 0}, + {"R_SPARC_WDISP19", Const, 0}, + {"R_SPARC_WDISP22", Const, 0}, + {"R_SPARC_WDISP30", Const, 0}, + {"R_SPARC_WPLT30", Const, 0}, + {"R_SYM32", Func, 0}, + {"R_SYM64", Func, 0}, + {"R_TYPE32", Func, 0}, + {"R_TYPE64", Func, 0}, + {"R_X86_64", Type, 0}, + {"R_X86_64_16", Const, 0}, + {"R_X86_64_32", Const, 0}, + {"R_X86_64_32S", Const, 0}, + {"R_X86_64_64", Const, 0}, + {"R_X86_64_8", Const, 0}, + {"R_X86_64_COPY", Const, 0}, + {"R_X86_64_DTPMOD64", Const, 0}, + {"R_X86_64_DTPOFF32", Const, 0}, + {"R_X86_64_DTPOFF64", Const, 0}, + {"R_X86_64_GLOB_DAT", Const, 0}, + {"R_X86_64_GOT32", Const, 0}, + {"R_X86_64_GOT64", Const, 10}, + {"R_X86_64_GOTOFF64", Const, 10}, + {"R_X86_64_GOTPC32", Const, 10}, + {"R_X86_64_GOTPC32_TLSDESC", Const, 10}, + {"R_X86_64_GOTPC64", Const, 10}, + {"R_X86_64_GOTPCREL", Const, 0}, + {"R_X86_64_GOTPCREL64", Const, 10}, + {"R_X86_64_GOTPCRELX", Const, 10}, + {"R_X86_64_GOTPLT64", Const, 10}, + {"R_X86_64_GOTTPOFF", Const, 0}, + {"R_X86_64_IRELATIVE", Const, 10}, + {"R_X86_64_JMP_SLOT", Const, 0}, + {"R_X86_64_NONE", Const, 0}, + {"R_X86_64_PC16", Const, 0}, + {"R_X86_64_PC32", Const, 0}, + {"R_X86_64_PC32_BND", Const, 10}, + {"R_X86_64_PC64", Const, 10}, + {"R_X86_64_PC8", Const, 0}, + {"R_X86_64_PLT32", Const, 0}, + {"R_X86_64_PLT32_BND", Const, 10}, + {"R_X86_64_PLTOFF64", Const, 10}, + {"R_X86_64_RELATIVE", Const, 0}, + {"R_X86_64_RELATIVE64", Const, 10}, + {"R_X86_64_REX_GOTPCRELX", Const, 10}, + {"R_X86_64_SIZE32", Const, 10}, + {"R_X86_64_SIZE64", Const, 10}, + {"R_X86_64_TLSDESC", Const, 10}, + {"R_X86_64_TLSDESC_CALL", Const, 10}, + {"R_X86_64_TLSGD", Const, 0}, + {"R_X86_64_TLSLD", Const, 0}, + {"R_X86_64_TPOFF32", Const, 0}, + {"R_X86_64_TPOFF64", Const, 0}, + {"Rel32", Type, 0}, + {"Rel32.Info", Field, 0}, + {"Rel32.Off", Field, 0}, + {"Rel64", Type, 0}, + {"Rel64.Info", Field, 0}, + {"Rel64.Off", Field, 0}, + {"Rela32", Type, 0}, + {"Rela32.Addend", Field, 0}, + {"Rela32.Info", Field, 0}, + {"Rela32.Off", Field, 0}, + {"Rela64", Type, 0}, + {"Rela64.Addend", Field, 0}, + {"Rela64.Info", Field, 0}, + {"Rela64.Off", Field, 0}, + {"SHF_ALLOC", Const, 0}, + {"SHF_COMPRESSED", Const, 6}, + {"SHF_EXECINSTR", Const, 0}, + {"SHF_GROUP", Const, 0}, + {"SHF_INFO_LINK", Const, 0}, + {"SHF_LINK_ORDER", Const, 0}, + {"SHF_MASKOS", Const, 0}, + {"SHF_MASKPROC", Const, 0}, + {"SHF_MERGE", Const, 0}, + {"SHF_OS_NONCONFORMING", Const, 0}, + {"SHF_STRINGS", Const, 0}, + {"SHF_TLS", Const, 0}, + {"SHF_WRITE", Const, 0}, + {"SHN_ABS", Const, 0}, + {"SHN_COMMON", Const, 0}, + {"SHN_HIOS", Const, 0}, + {"SHN_HIPROC", Const, 0}, + {"SHN_HIRESERVE", Const, 0}, + {"SHN_LOOS", Const, 0}, + {"SHN_LOPROC", Const, 0}, + {"SHN_LORESERVE", Const, 0}, + {"SHN_UNDEF", Const, 0}, + {"SHN_XINDEX", Const, 0}, + {"SHT_DYNAMIC", Const, 0}, + {"SHT_DYNSYM", Const, 0}, + {"SHT_FINI_ARRAY", Const, 0}, + {"SHT_GNU_ATTRIBUTES", Const, 0}, + {"SHT_GNU_HASH", Const, 0}, + {"SHT_GNU_LIBLIST", Const, 0}, + {"SHT_GNU_VERDEF", Const, 0}, + {"SHT_GNU_VERNEED", Const, 0}, + {"SHT_GNU_VERSYM", Const, 0}, + {"SHT_GROUP", Const, 0}, + {"SHT_HASH", Const, 0}, + {"SHT_HIOS", Const, 0}, + {"SHT_HIPROC", Const, 0}, + {"SHT_HIUSER", Const, 0}, + {"SHT_INIT_ARRAY", Const, 0}, + {"SHT_LOOS", Const, 0}, + {"SHT_LOPROC", Const, 0}, + {"SHT_LOUSER", Const, 0}, + {"SHT_MIPS_ABIFLAGS", Const, 17}, + {"SHT_NOBITS", Const, 0}, + {"SHT_NOTE", Const, 0}, + {"SHT_NULL", Const, 0}, + {"SHT_PREINIT_ARRAY", Const, 0}, + {"SHT_PROGBITS", Const, 0}, + {"SHT_REL", Const, 0}, + {"SHT_RELA", Const, 0}, + {"SHT_SHLIB", Const, 0}, + {"SHT_STRTAB", Const, 0}, + {"SHT_SYMTAB", Const, 0}, + {"SHT_SYMTAB_SHNDX", Const, 0}, + {"STB_GLOBAL", Const, 0}, + {"STB_HIOS", Const, 0}, + {"STB_HIPROC", Const, 0}, + {"STB_LOCAL", Const, 0}, + {"STB_LOOS", Const, 0}, + {"STB_LOPROC", Const, 0}, + {"STB_WEAK", Const, 0}, + {"STT_COMMON", Const, 0}, + {"STT_FILE", Const, 0}, + {"STT_FUNC", Const, 0}, + {"STT_HIOS", Const, 0}, + {"STT_HIPROC", Const, 0}, + {"STT_LOOS", Const, 0}, + {"STT_LOPROC", Const, 0}, + {"STT_NOTYPE", Const, 0}, + {"STT_OBJECT", Const, 0}, + {"STT_SECTION", Const, 0}, + {"STT_TLS", Const, 0}, + {"STV_DEFAULT", Const, 0}, + {"STV_HIDDEN", Const, 0}, + {"STV_INTERNAL", Const, 0}, + {"STV_PROTECTED", Const, 0}, + {"ST_BIND", Func, 0}, + {"ST_INFO", Func, 0}, + {"ST_TYPE", Func, 0}, + {"ST_VISIBILITY", Func, 0}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Addralign", Field, 0}, + {"Section32.Entsize", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Info", Field, 0}, + {"Section32.Link", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Off", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section32.Type", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Addralign", Field, 0}, + {"Section64.Entsize", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Info", Field, 0}, + {"Section64.Link", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Off", Field, 0}, + {"Section64.Size", Field, 0}, + {"Section64.Type", Field, 0}, + {"SectionFlag", Type, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Addralign", Field, 0}, + {"SectionHeader.Entsize", Field, 0}, + {"SectionHeader.FileSize", Field, 6}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Info", Field, 0}, + {"SectionHeader.Link", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.Type", Field, 0}, + {"SectionIndex", Type, 0}, + {"SectionType", Type, 0}, + {"Sym32", Type, 0}, + {"Sym32.Info", Field, 0}, + {"Sym32.Name", Field, 0}, + {"Sym32.Other", Field, 0}, + {"Sym32.Shndx", Field, 0}, + {"Sym32.Size", Field, 0}, + {"Sym32.Value", Field, 0}, + {"Sym32Size", Const, 0}, + {"Sym64", Type, 0}, + {"Sym64.Info", Field, 0}, + {"Sym64.Name", Field, 0}, + {"Sym64.Other", Field, 0}, + {"Sym64.Shndx", Field, 0}, + {"Sym64.Size", Field, 0}, + {"Sym64.Value", Field, 0}, + {"Sym64Size", Const, 0}, + {"SymBind", Type, 0}, + {"SymType", Type, 0}, + {"SymVis", Type, 0}, + {"Symbol", Type, 0}, + {"Symbol.Info", Field, 0}, + {"Symbol.Library", Field, 13}, + {"Symbol.Name", Field, 0}, + {"Symbol.Other", Field, 0}, + {"Symbol.Section", Field, 0}, + {"Symbol.Size", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symbol.Version", Field, 13}, + {"Type", Type, 0}, + {"Version", Type, 0}, + }, + "debug/gosym": { + {"(*DecodingError).Error", Method, 0}, + {"(*LineTable).LineToPC", Method, 0}, + {"(*LineTable).PCToLine", Method, 0}, + {"(*Sym).BaseName", Method, 0}, + {"(*Sym).PackageName", Method, 0}, + {"(*Sym).ReceiverName", Method, 0}, + {"(*Sym).Static", Method, 0}, + {"(*Table).LineToPC", Method, 0}, + {"(*Table).LookupFunc", Method, 0}, + {"(*Table).LookupSym", Method, 0}, + {"(*Table).PCToFunc", Method, 0}, + {"(*Table).PCToLine", Method, 0}, + {"(*Table).SymByAddr", Method, 0}, + {"(*UnknownLineError).Error", Method, 0}, + {"(Func).BaseName", Method, 0}, + {"(Func).PackageName", Method, 0}, + {"(Func).ReceiverName", Method, 0}, + {"(Func).Static", Method, 0}, + {"(UnknownFileError).Error", Method, 0}, + {"DecodingError", Type, 0}, + {"Func", Type, 0}, + {"Func.End", Field, 0}, + {"Func.Entry", Field, 0}, + {"Func.FrameSize", Field, 0}, + {"Func.LineTable", Field, 0}, + {"Func.Locals", Field, 0}, + {"Func.Obj", Field, 0}, + {"Func.Params", Field, 0}, + {"Func.Sym", Field, 0}, + {"LineTable", Type, 0}, + {"LineTable.Data", Field, 0}, + {"LineTable.Line", Field, 0}, + {"LineTable.PC", Field, 0}, + {"NewLineTable", Func, 0}, + {"NewTable", Func, 0}, + {"Obj", Type, 0}, + {"Obj.Funcs", Field, 0}, + {"Obj.Paths", Field, 0}, + {"Sym", Type, 0}, + {"Sym.Func", Field, 0}, + {"Sym.GoType", Field, 0}, + {"Sym.Name", Field, 0}, + {"Sym.Type", Field, 0}, + {"Sym.Value", Field, 0}, + {"Table", Type, 0}, + {"Table.Files", Field, 0}, + {"Table.Funcs", Field, 0}, + {"Table.Objs", Field, 0}, + {"Table.Syms", Field, 0}, + {"UnknownFileError", Type, 0}, + {"UnknownLineError", Type, 0}, + {"UnknownLineError.File", Field, 0}, + {"UnknownLineError.Line", Field, 0}, + }, + "debug/macho": { + {"(*FatFile).Close", Method, 3}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).Segment", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(*Segment).Data", Method, 0}, + {"(*Segment).Open", Method, 0}, + {"(Cpu).GoString", Method, 0}, + {"(Cpu).String", Method, 0}, + {"(Dylib).Raw", Method, 0}, + {"(Dysymtab).Raw", Method, 0}, + {"(FatArch).Close", Method, 3}, + {"(FatArch).DWARF", Method, 3}, + {"(FatArch).ImportedLibraries", Method, 3}, + {"(FatArch).ImportedSymbols", Method, 3}, + {"(FatArch).Section", Method, 3}, + {"(FatArch).Segment", Method, 3}, + {"(LoadBytes).Raw", Method, 0}, + {"(LoadCmd).GoString", Method, 0}, + {"(LoadCmd).String", Method, 0}, + {"(RelocTypeARM).GoString", Method, 10}, + {"(RelocTypeARM).String", Method, 10}, + {"(RelocTypeARM64).GoString", Method, 10}, + {"(RelocTypeARM64).String", Method, 10}, + {"(RelocTypeGeneric).GoString", Method, 10}, + {"(RelocTypeGeneric).String", Method, 10}, + {"(RelocTypeX86_64).GoString", Method, 10}, + {"(RelocTypeX86_64).String", Method, 10}, + {"(Rpath).Raw", Method, 10}, + {"(Section).ReadAt", Method, 0}, + {"(Segment).Raw", Method, 0}, + {"(Segment).ReadAt", Method, 0}, + {"(Symtab).Raw", Method, 0}, + {"(Type).GoString", Method, 10}, + {"(Type).String", Method, 10}, + {"ARM64_RELOC_ADDEND", Const, 10}, + {"ARM64_RELOC_BRANCH26", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_PAGE21", Const, 10}, + {"ARM64_RELOC_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_POINTER_TO_GOT", Const, 10}, + {"ARM64_RELOC_SUBTRACTOR", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_UNSIGNED", Const, 10}, + {"ARM_RELOC_BR24", Const, 10}, + {"ARM_RELOC_HALF", Const, 10}, + {"ARM_RELOC_HALF_SECTDIFF", Const, 10}, + {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"ARM_RELOC_PAIR", Const, 10}, + {"ARM_RELOC_PB_LA_PTR", Const, 10}, + {"ARM_RELOC_SECTDIFF", Const, 10}, + {"ARM_RELOC_VANILLA", Const, 10}, + {"ARM_THUMB_32BIT_BRANCH", Const, 10}, + {"ARM_THUMB_RELOC_BR22", Const, 10}, + {"Cpu", Type, 0}, + {"Cpu386", Const, 0}, + {"CpuAmd64", Const, 0}, + {"CpuArm", Const, 3}, + {"CpuArm64", Const, 11}, + {"CpuPpc", Const, 3}, + {"CpuPpc64", Const, 3}, + {"Dylib", Type, 0}, + {"Dylib.CompatVersion", Field, 0}, + {"Dylib.CurrentVersion", Field, 0}, + {"Dylib.LoadBytes", Field, 0}, + {"Dylib.Name", Field, 0}, + {"Dylib.Time", Field, 0}, + {"DylibCmd", Type, 0}, + {"DylibCmd.Cmd", Field, 0}, + {"DylibCmd.CompatVersion", Field, 0}, + {"DylibCmd.CurrentVersion", Field, 0}, + {"DylibCmd.Len", Field, 0}, + {"DylibCmd.Name", Field, 0}, + {"DylibCmd.Time", Field, 0}, + {"Dysymtab", Type, 0}, + {"Dysymtab.DysymtabCmd", Field, 0}, + {"Dysymtab.IndirectSyms", Field, 0}, + {"Dysymtab.LoadBytes", Field, 0}, + {"DysymtabCmd", Type, 0}, + {"DysymtabCmd.Cmd", Field, 0}, + {"DysymtabCmd.Extrefsymoff", Field, 0}, + {"DysymtabCmd.Extreloff", Field, 0}, + {"DysymtabCmd.Iextdefsym", Field, 0}, + {"DysymtabCmd.Ilocalsym", Field, 0}, + {"DysymtabCmd.Indirectsymoff", Field, 0}, + {"DysymtabCmd.Iundefsym", Field, 0}, + {"DysymtabCmd.Len", Field, 0}, + {"DysymtabCmd.Locreloff", Field, 0}, + {"DysymtabCmd.Modtaboff", Field, 0}, + {"DysymtabCmd.Nextdefsym", Field, 0}, + {"DysymtabCmd.Nextrefsyms", Field, 0}, + {"DysymtabCmd.Nextrel", Field, 0}, + {"DysymtabCmd.Nindirectsyms", Field, 0}, + {"DysymtabCmd.Nlocalsym", Field, 0}, + {"DysymtabCmd.Nlocrel", Field, 0}, + {"DysymtabCmd.Nmodtab", Field, 0}, + {"DysymtabCmd.Ntoc", Field, 0}, + {"DysymtabCmd.Nundefsym", Field, 0}, + {"DysymtabCmd.Tocoffset", Field, 0}, + {"ErrNotFat", Var, 3}, + {"FatArch", Type, 3}, + {"FatArch.FatArchHeader", Field, 3}, + {"FatArch.File", Field, 3}, + {"FatArchHeader", Type, 3}, + {"FatArchHeader.Align", Field, 3}, + {"FatArchHeader.Cpu", Field, 3}, + {"FatArchHeader.Offset", Field, 3}, + {"FatArchHeader.Size", Field, 3}, + {"FatArchHeader.SubCpu", Field, 3}, + {"FatFile", Type, 3}, + {"FatFile.Arches", Field, 3}, + {"FatFile.Magic", Field, 3}, + {"File", Type, 0}, + {"File.ByteOrder", Field, 0}, + {"File.Dysymtab", Field, 0}, + {"File.FileHeader", Field, 0}, + {"File.Loads", Field, 0}, + {"File.Sections", Field, 0}, + {"File.Symtab", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Cmdsz", Field, 0}, + {"FileHeader.Cpu", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Magic", Field, 0}, + {"FileHeader.Ncmd", Field, 0}, + {"FileHeader.SubCpu", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FlagAllModsBound", Const, 10}, + {"FlagAllowStackExecution", Const, 10}, + {"FlagAppExtensionSafe", Const, 10}, + {"FlagBindAtLoad", Const, 10}, + {"FlagBindsToWeak", Const, 10}, + {"FlagCanonical", Const, 10}, + {"FlagDeadStrippableDylib", Const, 10}, + {"FlagDyldLink", Const, 10}, + {"FlagForceFlat", Const, 10}, + {"FlagHasTLVDescriptors", Const, 10}, + {"FlagIncrLink", Const, 10}, + {"FlagLazyInit", Const, 10}, + {"FlagNoFixPrebinding", Const, 10}, + {"FlagNoHeapExecution", Const, 10}, + {"FlagNoMultiDefs", Const, 10}, + {"FlagNoReexportedDylibs", Const, 10}, + {"FlagNoUndefs", Const, 10}, + {"FlagPIE", Const, 10}, + {"FlagPrebindable", Const, 10}, + {"FlagPrebound", Const, 10}, + {"FlagRootSafe", Const, 10}, + {"FlagSetuidSafe", Const, 10}, + {"FlagSplitSegs", Const, 10}, + {"FlagSubsectionsViaSymbols", Const, 10}, + {"FlagTwoLevel", Const, 10}, + {"FlagWeakDefines", Const, 10}, + {"FormatError", Type, 0}, + {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_PAIR", Const, 10}, + {"GENERIC_RELOC_PB_LA_PTR", Const, 10}, + {"GENERIC_RELOC_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_TLV", Const, 10}, + {"GENERIC_RELOC_VANILLA", Const, 10}, + {"Load", Type, 0}, + {"LoadBytes", Type, 0}, + {"LoadCmd", Type, 0}, + {"LoadCmdDylib", Const, 0}, + {"LoadCmdDylinker", Const, 0}, + {"LoadCmdDysymtab", Const, 0}, + {"LoadCmdRpath", Const, 10}, + {"LoadCmdSegment", Const, 0}, + {"LoadCmdSegment64", Const, 0}, + {"LoadCmdSymtab", Const, 0}, + {"LoadCmdThread", Const, 0}, + {"LoadCmdUnixThread", Const, 0}, + {"Magic32", Const, 0}, + {"Magic64", Const, 0}, + {"MagicFat", Const, 3}, + {"NewFatFile", Func, 3}, + {"NewFile", Func, 0}, + {"Nlist32", Type, 0}, + {"Nlist32.Desc", Field, 0}, + {"Nlist32.Name", Field, 0}, + {"Nlist32.Sect", Field, 0}, + {"Nlist32.Type", Field, 0}, + {"Nlist32.Value", Field, 0}, + {"Nlist64", Type, 0}, + {"Nlist64.Desc", Field, 0}, + {"Nlist64.Name", Field, 0}, + {"Nlist64.Sect", Field, 0}, + {"Nlist64.Type", Field, 0}, + {"Nlist64.Value", Field, 0}, + {"Open", Func, 0}, + {"OpenFat", Func, 3}, + {"Regs386", Type, 0}, + {"Regs386.AX", Field, 0}, + {"Regs386.BP", Field, 0}, + {"Regs386.BX", Field, 0}, + {"Regs386.CS", Field, 0}, + {"Regs386.CX", Field, 0}, + {"Regs386.DI", Field, 0}, + {"Regs386.DS", Field, 0}, + {"Regs386.DX", Field, 0}, + {"Regs386.ES", Field, 0}, + {"Regs386.FLAGS", Field, 0}, + {"Regs386.FS", Field, 0}, + {"Regs386.GS", Field, 0}, + {"Regs386.IP", Field, 0}, + {"Regs386.SI", Field, 0}, + {"Regs386.SP", Field, 0}, + {"Regs386.SS", Field, 0}, + {"RegsAMD64", Type, 0}, + {"RegsAMD64.AX", Field, 0}, + {"RegsAMD64.BP", Field, 0}, + {"RegsAMD64.BX", Field, 0}, + {"RegsAMD64.CS", Field, 0}, + {"RegsAMD64.CX", Field, 0}, + {"RegsAMD64.DI", Field, 0}, + {"RegsAMD64.DX", Field, 0}, + {"RegsAMD64.FLAGS", Field, 0}, + {"RegsAMD64.FS", Field, 0}, + {"RegsAMD64.GS", Field, 0}, + {"RegsAMD64.IP", Field, 0}, + {"RegsAMD64.R10", Field, 0}, + {"RegsAMD64.R11", Field, 0}, + {"RegsAMD64.R12", Field, 0}, + {"RegsAMD64.R13", Field, 0}, + {"RegsAMD64.R14", Field, 0}, + {"RegsAMD64.R15", Field, 0}, + {"RegsAMD64.R8", Field, 0}, + {"RegsAMD64.R9", Field, 0}, + {"RegsAMD64.SI", Field, 0}, + {"RegsAMD64.SP", Field, 0}, + {"Reloc", Type, 10}, + {"Reloc.Addr", Field, 10}, + {"Reloc.Extern", Field, 10}, + {"Reloc.Len", Field, 10}, + {"Reloc.Pcrel", Field, 10}, + {"Reloc.Scattered", Field, 10}, + {"Reloc.Type", Field, 10}, + {"Reloc.Value", Field, 10}, + {"RelocTypeARM", Type, 10}, + {"RelocTypeARM64", Type, 10}, + {"RelocTypeGeneric", Type, 10}, + {"RelocTypeX86_64", Type, 10}, + {"Rpath", Type, 10}, + {"Rpath.LoadBytes", Field, 10}, + {"Rpath.Path", Field, 10}, + {"RpathCmd", Type, 10}, + {"RpathCmd.Cmd", Field, 10}, + {"RpathCmd.Len", Field, 10}, + {"RpathCmd.Path", Field, 10}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 10}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Align", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Nreloc", Field, 0}, + {"Section32.Offset", Field, 0}, + {"Section32.Reloff", Field, 0}, + {"Section32.Reserve1", Field, 0}, + {"Section32.Reserve2", Field, 0}, + {"Section32.Seg", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Align", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Nreloc", Field, 0}, + {"Section64.Offset", Field, 0}, + {"Section64.Reloff", Field, 0}, + {"Section64.Reserve1", Field, 0}, + {"Section64.Reserve2", Field, 0}, + {"Section64.Reserve3", Field, 0}, + {"Section64.Seg", Field, 0}, + {"Section64.Size", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Align", Field, 0}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Nreloc", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Reloff", Field, 0}, + {"SectionHeader.Seg", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"Segment", Type, 0}, + {"Segment.LoadBytes", Field, 0}, + {"Segment.ReaderAt", Field, 0}, + {"Segment.SegmentHeader", Field, 0}, + {"Segment32", Type, 0}, + {"Segment32.Addr", Field, 0}, + {"Segment32.Cmd", Field, 0}, + {"Segment32.Filesz", Field, 0}, + {"Segment32.Flag", Field, 0}, + {"Segment32.Len", Field, 0}, + {"Segment32.Maxprot", Field, 0}, + {"Segment32.Memsz", Field, 0}, + {"Segment32.Name", Field, 0}, + {"Segment32.Nsect", Field, 0}, + {"Segment32.Offset", Field, 0}, + {"Segment32.Prot", Field, 0}, + {"Segment64", Type, 0}, + {"Segment64.Addr", Field, 0}, + {"Segment64.Cmd", Field, 0}, + {"Segment64.Filesz", Field, 0}, + {"Segment64.Flag", Field, 0}, + {"Segment64.Len", Field, 0}, + {"Segment64.Maxprot", Field, 0}, + {"Segment64.Memsz", Field, 0}, + {"Segment64.Name", Field, 0}, + {"Segment64.Nsect", Field, 0}, + {"Segment64.Offset", Field, 0}, + {"Segment64.Prot", Field, 0}, + {"SegmentHeader", Type, 0}, + {"SegmentHeader.Addr", Field, 0}, + {"SegmentHeader.Cmd", Field, 0}, + {"SegmentHeader.Filesz", Field, 0}, + {"SegmentHeader.Flag", Field, 0}, + {"SegmentHeader.Len", Field, 0}, + {"SegmentHeader.Maxprot", Field, 0}, + {"SegmentHeader.Memsz", Field, 0}, + {"SegmentHeader.Name", Field, 0}, + {"SegmentHeader.Nsect", Field, 0}, + {"SegmentHeader.Offset", Field, 0}, + {"SegmentHeader.Prot", Field, 0}, + {"Symbol", Type, 0}, + {"Symbol.Desc", Field, 0}, + {"Symbol.Name", Field, 0}, + {"Symbol.Sect", Field, 0}, + {"Symbol.Type", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symtab", Type, 0}, + {"Symtab.LoadBytes", Field, 0}, + {"Symtab.Syms", Field, 0}, + {"Symtab.SymtabCmd", Field, 0}, + {"SymtabCmd", Type, 0}, + {"SymtabCmd.Cmd", Field, 0}, + {"SymtabCmd.Len", Field, 0}, + {"SymtabCmd.Nsyms", Field, 0}, + {"SymtabCmd.Stroff", Field, 0}, + {"SymtabCmd.Strsize", Field, 0}, + {"SymtabCmd.Symoff", Field, 0}, + {"Thread", Type, 0}, + {"Thread.Cmd", Field, 0}, + {"Thread.Data", Field, 0}, + {"Thread.Len", Field, 0}, + {"Thread.Type", Field, 0}, + {"Type", Type, 0}, + {"TypeBundle", Const, 3}, + {"TypeDylib", Const, 3}, + {"TypeExec", Const, 0}, + {"TypeObj", Const, 0}, + {"X86_64_RELOC_BRANCH", Const, 10}, + {"X86_64_RELOC_GOT", Const, 10}, + {"X86_64_RELOC_GOT_LOAD", Const, 10}, + {"X86_64_RELOC_SIGNED", Const, 10}, + {"X86_64_RELOC_SIGNED_1", Const, 10}, + {"X86_64_RELOC_SIGNED_2", Const, 10}, + {"X86_64_RELOC_SIGNED_4", Const, 10}, + {"X86_64_RELOC_SUBTRACTOR", Const, 10}, + {"X86_64_RELOC_TLV", Const, 10}, + {"X86_64_RELOC_UNSIGNED", Const, 10}, + }, + "debug/pe": { + {"(*COFFSymbol).FullName", Method, 8}, + {"(*File).COFFSymbolReadSectionDefAux", Method, 19}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(StringTable).String", Method, 8}, + {"COFFSymbol", Type, 1}, + {"COFFSymbol.Name", Field, 1}, + {"COFFSymbol.NumberOfAuxSymbols", Field, 1}, + {"COFFSymbol.SectionNumber", Field, 1}, + {"COFFSymbol.StorageClass", Field, 1}, + {"COFFSymbol.Type", Field, 1}, + {"COFFSymbol.Value", Field, 1}, + {"COFFSymbolAuxFormat5", Type, 19}, + {"COFFSymbolAuxFormat5.Checksum", Field, 19}, + {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19}, + {"COFFSymbolAuxFormat5.NumRelocs", Field, 19}, + {"COFFSymbolAuxFormat5.SecNum", Field, 19}, + {"COFFSymbolAuxFormat5.Selection", Field, 19}, + {"COFFSymbolAuxFormat5.Size", Field, 19}, + {"COFFSymbolSize", Const, 1}, + {"DataDirectory", Type, 3}, + {"DataDirectory.Size", Field, 3}, + {"DataDirectory.VirtualAddress", Field, 3}, + {"File", Type, 0}, + {"File.COFFSymbols", Field, 8}, + {"File.FileHeader", Field, 0}, + {"File.OptionalHeader", Field, 3}, + {"File.Sections", Field, 0}, + {"File.StringTable", Field, 8}, + {"File.Symbols", Field, 1}, + {"FileHeader", Type, 0}, + {"FileHeader.Characteristics", Field, 0}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.NumberOfSections", Field, 0}, + {"FileHeader.NumberOfSymbols", Field, 0}, + {"FileHeader.PointerToSymbolTable", Field, 0}, + {"FileHeader.SizeOfOptionalHeader", Field, 0}, + {"FileHeader.TimeDateStamp", Field, 0}, + {"FormatError", Type, 0}, + {"IMAGE_COMDAT_SELECT_ANY", Const, 19}, + {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19}, + {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19}, + {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19}, + {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19}, + {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19}, + {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11}, + {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15}, + {"IMAGE_FILE_32BIT_MACHINE", Const, 15}, + {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15}, + {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15}, + {"IMAGE_FILE_DLL", Const, 15}, + {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15}, + {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15}, + {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_MACHINE_AM33", Const, 0}, + {"IMAGE_FILE_MACHINE_AMD64", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM64", Const, 11}, + {"IMAGE_FILE_MACHINE_ARMNT", Const, 12}, + {"IMAGE_FILE_MACHINE_EBC", Const, 0}, + {"IMAGE_FILE_MACHINE_I386", Const, 0}, + {"IMAGE_FILE_MACHINE_IA64", Const, 0}, + {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19}, + {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19}, + {"IMAGE_FILE_MACHINE_M32R", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPS16", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPC", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0}, + {"IMAGE_FILE_MACHINE_R4000", Const, 0}, + {"IMAGE_FILE_MACHINE_RISCV128", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV32", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV64", Const, 20}, + {"IMAGE_FILE_MACHINE_SH3", Const, 0}, + {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0}, + {"IMAGE_FILE_MACHINE_SH4", Const, 0}, + {"IMAGE_FILE_MACHINE_SH5", Const, 0}, + {"IMAGE_FILE_MACHINE_THUMB", Const, 0}, + {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0}, + {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0}, + {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15}, + {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_SYSTEM", Const, 15}, + {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15}, + {"IMAGE_SCN_CNT_CODE", Const, 19}, + {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_LNK_COMDAT", Const, 19}, + {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19}, + {"IMAGE_SCN_MEM_EXECUTE", Const, 19}, + {"IMAGE_SCN_MEM_READ", Const, 19}, + {"IMAGE_SCN_MEM_WRITE", Const, 19}, + {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15}, + {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_XBOX", Const, 15}, + {"ImportDirectory", Type, 0}, + {"ImportDirectory.FirstThunk", Field, 0}, + {"ImportDirectory.ForwarderChain", Field, 0}, + {"ImportDirectory.Name", Field, 0}, + {"ImportDirectory.OriginalFirstThunk", Field, 0}, + {"ImportDirectory.TimeDateStamp", Field, 0}, + {"NewFile", Func, 0}, + {"Open", Func, 0}, + {"OptionalHeader32", Type, 3}, + {"OptionalHeader32.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader32.BaseOfCode", Field, 3}, + {"OptionalHeader32.BaseOfData", Field, 3}, + {"OptionalHeader32.CheckSum", Field, 3}, + {"OptionalHeader32.DataDirectory", Field, 3}, + {"OptionalHeader32.DllCharacteristics", Field, 3}, + {"OptionalHeader32.FileAlignment", Field, 3}, + {"OptionalHeader32.ImageBase", Field, 3}, + {"OptionalHeader32.LoaderFlags", Field, 3}, + {"OptionalHeader32.Magic", Field, 3}, + {"OptionalHeader32.MajorImageVersion", Field, 3}, + {"OptionalHeader32.MajorLinkerVersion", Field, 3}, + {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader32.MinorImageVersion", Field, 3}, + {"OptionalHeader32.MinorLinkerVersion", Field, 3}, + {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader32.SectionAlignment", Field, 3}, + {"OptionalHeader32.SizeOfCode", Field, 3}, + {"OptionalHeader32.SizeOfHeaders", Field, 3}, + {"OptionalHeader32.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader32.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader32.SizeOfImage", Field, 3}, + {"OptionalHeader32.SizeOfInitializedData", Field, 3}, + {"OptionalHeader32.SizeOfStackCommit", Field, 3}, + {"OptionalHeader32.SizeOfStackReserve", Field, 3}, + {"OptionalHeader32.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader32.Subsystem", Field, 3}, + {"OptionalHeader32.Win32VersionValue", Field, 3}, + {"OptionalHeader64", Type, 3}, + {"OptionalHeader64.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader64.BaseOfCode", Field, 3}, + {"OptionalHeader64.CheckSum", Field, 3}, + {"OptionalHeader64.DataDirectory", Field, 3}, + {"OptionalHeader64.DllCharacteristics", Field, 3}, + {"OptionalHeader64.FileAlignment", Field, 3}, + {"OptionalHeader64.ImageBase", Field, 3}, + {"OptionalHeader64.LoaderFlags", Field, 3}, + {"OptionalHeader64.Magic", Field, 3}, + {"OptionalHeader64.MajorImageVersion", Field, 3}, + {"OptionalHeader64.MajorLinkerVersion", Field, 3}, + {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader64.MinorImageVersion", Field, 3}, + {"OptionalHeader64.MinorLinkerVersion", Field, 3}, + {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader64.SectionAlignment", Field, 3}, + {"OptionalHeader64.SizeOfCode", Field, 3}, + {"OptionalHeader64.SizeOfHeaders", Field, 3}, + {"OptionalHeader64.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader64.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader64.SizeOfImage", Field, 3}, + {"OptionalHeader64.SizeOfInitializedData", Field, 3}, + {"OptionalHeader64.SizeOfStackCommit", Field, 3}, + {"OptionalHeader64.SizeOfStackReserve", Field, 3}, + {"OptionalHeader64.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader64.Subsystem", Field, 3}, + {"OptionalHeader64.Win32VersionValue", Field, 3}, + {"Reloc", Type, 8}, + {"Reloc.SymbolTableIndex", Field, 8}, + {"Reloc.Type", Field, 8}, + {"Reloc.VirtualAddress", Field, 8}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 8}, + {"Section.SectionHeader", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Characteristics", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.NumberOfLineNumbers", Field, 0}, + {"SectionHeader.NumberOfRelocations", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.PointerToLineNumbers", Field, 0}, + {"SectionHeader.PointerToRelocations", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.VirtualAddress", Field, 0}, + {"SectionHeader.VirtualSize", Field, 0}, + {"SectionHeader32", Type, 0}, + {"SectionHeader32.Characteristics", Field, 0}, + {"SectionHeader32.Name", Field, 0}, + {"SectionHeader32.NumberOfLineNumbers", Field, 0}, + {"SectionHeader32.NumberOfRelocations", Field, 0}, + {"SectionHeader32.PointerToLineNumbers", Field, 0}, + {"SectionHeader32.PointerToRawData", Field, 0}, + {"SectionHeader32.PointerToRelocations", Field, 0}, + {"SectionHeader32.SizeOfRawData", Field, 0}, + {"SectionHeader32.VirtualAddress", Field, 0}, + {"SectionHeader32.VirtualSize", Field, 0}, + {"StringTable", Type, 8}, + {"Symbol", Type, 1}, + {"Symbol.Name", Field, 1}, + {"Symbol.SectionNumber", Field, 1}, + {"Symbol.StorageClass", Field, 1}, + {"Symbol.Type", Field, 1}, + {"Symbol.Value", Field, 1}, + }, + "debug/plan9obj": { + {"(*File).Close", Method, 3}, + {"(*File).Section", Method, 3}, + {"(*File).Symbols", Method, 3}, + {"(*Section).Data", Method, 3}, + {"(*Section).Open", Method, 3}, + {"(Section).ReadAt", Method, 3}, + {"ErrNoSymbols", Var, 18}, + {"File", Type, 3}, + {"File.FileHeader", Field, 3}, + {"File.Sections", Field, 3}, + {"FileHeader", Type, 3}, + {"FileHeader.Bss", Field, 3}, + {"FileHeader.Entry", Field, 3}, + {"FileHeader.HdrSize", Field, 4}, + {"FileHeader.LoadAddress", Field, 4}, + {"FileHeader.Magic", Field, 3}, + {"FileHeader.PtrSize", Field, 3}, + {"Magic386", Const, 3}, + {"Magic64", Const, 3}, + {"MagicAMD64", Const, 3}, + {"MagicARM", Const, 3}, + {"NewFile", Func, 3}, + {"Open", Func, 3}, + {"Section", Type, 3}, + {"Section.ReaderAt", Field, 3}, + {"Section.SectionHeader", Field, 3}, + {"SectionHeader", Type, 3}, + {"SectionHeader.Name", Field, 3}, + {"SectionHeader.Offset", Field, 3}, + {"SectionHeader.Size", Field, 3}, + {"Sym", Type, 3}, + {"Sym.Name", Field, 3}, + {"Sym.Type", Field, 3}, + {"Sym.Value", Field, 3}, + }, + "embed": { + {"(FS).Open", Method, 16}, + {"(FS).ReadDir", Method, 16}, + {"(FS).ReadFile", Method, 16}, + {"FS", Type, 16}, + }, + "encoding": { + {"BinaryMarshaler", Type, 2}, + {"BinaryUnmarshaler", Type, 2}, + {"TextMarshaler", Type, 2}, + {"TextUnmarshaler", Type, 2}, + }, + "encoding/ascii85": { + {"(CorruptInputError).Error", Method, 0}, + {"CorruptInputError", Type, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"MaxEncodedLen", Func, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + }, + "encoding/asn1": { + {"(BitString).At", Method, 0}, + {"(BitString).RightAlign", Method, 0}, + {"(ObjectIdentifier).Equal", Method, 0}, + {"(ObjectIdentifier).String", Method, 3}, + {"(StructuralError).Error", Method, 0}, + {"(SyntaxError).Error", Method, 0}, + {"BitString", Type, 0}, + {"BitString.BitLength", Field, 0}, + {"BitString.Bytes", Field, 0}, + {"ClassApplication", Const, 6}, + {"ClassContextSpecific", Const, 6}, + {"ClassPrivate", Const, 6}, + {"ClassUniversal", Const, 6}, + {"Enumerated", Type, 0}, + {"Flag", Type, 0}, + {"Marshal", Func, 0}, + {"MarshalWithParams", Func, 10}, + {"NullBytes", Var, 9}, + {"NullRawValue", Var, 9}, + {"ObjectIdentifier", Type, 0}, + {"RawContent", Type, 0}, + {"RawValue", Type, 0}, + {"RawValue.Bytes", Field, 0}, + {"RawValue.Class", Field, 0}, + {"RawValue.FullBytes", Field, 0}, + {"RawValue.IsCompound", Field, 0}, + {"RawValue.Tag", Field, 0}, + {"StructuralError", Type, 0}, + {"StructuralError.Msg", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagBMPString", Const, 14}, + {"TagBitString", Const, 6}, + {"TagBoolean", Const, 6}, + {"TagEnum", Const, 6}, + {"TagGeneralString", Const, 6}, + {"TagGeneralizedTime", Const, 6}, + {"TagIA5String", Const, 6}, + {"TagInteger", Const, 6}, + {"TagNull", Const, 9}, + {"TagNumericString", Const, 10}, + {"TagOID", Const, 6}, + {"TagOctetString", Const, 6}, + {"TagPrintableString", Const, 6}, + {"TagSequence", Const, 6}, + {"TagSet", Const, 6}, + {"TagT61String", Const, 6}, + {"TagUTCTime", Const, 6}, + {"TagUTF8String", Const, 6}, + {"Unmarshal", Func, 0}, + {"UnmarshalWithParams", Func, 0}, + }, + "encoding/base32": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).WithPadding", Method, 9}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"HexEncoding", Var, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 9}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 9}, + }, + "encoding/base64": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).Strict", Method, 8}, + {"(Encoding).WithPadding", Method, 5}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 5}, + {"RawStdEncoding", Var, 5}, + {"RawURLEncoding", Var, 5}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 5}, + {"URLEncoding", Var, 0}, + }, + "encoding/binary": { + {"AppendByteOrder", Type, 19}, + {"AppendUvarint", Func, 19}, + {"AppendVarint", Func, 19}, + {"BigEndian", Var, 0}, + {"ByteOrder", Type, 0}, + {"LittleEndian", Var, 0}, + {"MaxVarintLen16", Const, 0}, + {"MaxVarintLen32", Const, 0}, + {"MaxVarintLen64", Const, 0}, + {"NativeEndian", Var, 21}, + {"PutUvarint", Func, 0}, + {"PutVarint", Func, 0}, + {"Read", Func, 0}, + {"ReadUvarint", Func, 0}, + {"ReadVarint", Func, 0}, + {"Size", Func, 0}, + {"Uvarint", Func, 0}, + {"Varint", Func, 0}, + {"Write", Func, 0}, + }, + "encoding/csv": { + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Unwrap", Method, 13}, + {"(*Reader).FieldPos", Method, 17}, + {"(*Reader).InputOffset", Method, 19}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAll", Method, 0}, + {"(*Writer).Error", Method, 1}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteAll", Method, 0}, + {"ErrBareQuote", Var, 0}, + {"ErrFieldCount", Var, 0}, + {"ErrQuote", Var, 0}, + {"ErrTrailingComma", Var, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Column", Field, 0}, + {"ParseError.Err", Field, 0}, + {"ParseError.Line", Field, 0}, + {"ParseError.StartLine", Field, 10}, + {"Reader", Type, 0}, + {"Reader.Comma", Field, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.FieldsPerRecord", Field, 0}, + {"Reader.LazyQuotes", Field, 0}, + {"Reader.ReuseRecord", Field, 9}, + {"Reader.TrailingComma", Field, 0}, + {"Reader.TrimLeadingSpace", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Comma", Field, 0}, + {"Writer.UseCRLF", Field, 0}, + }, + "encoding/gob": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeValue", Method, 0}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeValue", Method, 0}, + {"CommonType", Type, 0}, + {"CommonType.Id", Field, 0}, + {"CommonType.Name", Field, 0}, + {"Decoder", Type, 0}, + {"Encoder", Type, 0}, + {"GobDecoder", Type, 0}, + {"GobEncoder", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + }, + "encoding/hex": { + {"(InvalidByteError).Error", Method, 0}, + {"AppendDecode", Func, 22}, + {"AppendEncode", Func, 22}, + {"Decode", Func, 0}, + {"DecodeString", Func, 0}, + {"DecodedLen", Func, 0}, + {"Dump", Func, 0}, + {"Dumper", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToString", Func, 0}, + {"EncodedLen", Func, 0}, + {"ErrLength", Var, 0}, + {"InvalidByteError", Type, 0}, + {"NewDecoder", Func, 10}, + {"NewEncoder", Func, 10}, + }, + "encoding/json": { + {"(*Decoder).Buffered", Method, 1}, + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DisallowUnknownFields", Method, 10}, + {"(*Decoder).InputOffset", Method, 14}, + {"(*Decoder).More", Method, 5}, + {"(*Decoder).Token", Method, 5}, + {"(*Decoder).UseNumber", Method, 1}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).SetEscapeHTML", Method, 7}, + {"(*Encoder).SetIndent", Method, 7}, + {"(*InvalidUTF8Error).Error", Method, 0}, + {"(*InvalidUnmarshalError).Error", Method, 0}, + {"(*MarshalerError).Error", Method, 0}, + {"(*MarshalerError).Unwrap", Method, 13}, + {"(*RawMessage).MarshalJSON", Method, 0}, + {"(*RawMessage).UnmarshalJSON", Method, 0}, + {"(*SyntaxError).Error", Method, 0}, + {"(*UnmarshalFieldError).Error", Method, 0}, + {"(*UnmarshalTypeError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(*UnsupportedValueError).Error", Method, 0}, + {"(Delim).String", Method, 5}, + {"(Number).Float64", Method, 1}, + {"(Number).Int64", Method, 1}, + {"(Number).String", Method, 1}, + {"(RawMessage).MarshalJSON", Method, 8}, + {"Compact", Func, 0}, + {"Decoder", Type, 0}, + {"Delim", Type, 5}, + {"Encoder", Type, 0}, + {"HTMLEscape", Func, 0}, + {"Indent", Func, 0}, + {"InvalidUTF8Error", Type, 0}, + {"InvalidUTF8Error.S", Field, 0}, + {"InvalidUnmarshalError", Type, 0}, + {"InvalidUnmarshalError.Type", Field, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 0}, + {"MarshalerError", Type, 0}, + {"MarshalerError.Err", Field, 0}, + {"MarshalerError.Type", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Number", Type, 1}, + {"RawMessage", Type, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Offset", Field, 0}, + {"Token", Type, 5}, + {"Unmarshal", Func, 0}, + {"UnmarshalFieldError", Type, 0}, + {"UnmarshalFieldError.Field", Field, 0}, + {"UnmarshalFieldError.Key", Field, 0}, + {"UnmarshalFieldError.Type", Field, 0}, + {"UnmarshalTypeError", Type, 0}, + {"UnmarshalTypeError.Field", Field, 8}, + {"UnmarshalTypeError.Offset", Field, 5}, + {"UnmarshalTypeError.Struct", Field, 8}, + {"UnmarshalTypeError.Type", Field, 0}, + {"UnmarshalTypeError.Value", Field, 0}, + {"Unmarshaler", Type, 0}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + {"UnsupportedValueError", Type, 0}, + {"UnsupportedValueError.Str", Field, 0}, + {"UnsupportedValueError.Value", Field, 0}, + {"Valid", Func, 9}, + }, + "encoding/pem": { + {"Block", Type, 0}, + {"Block.Bytes", Field, 0}, + {"Block.Headers", Field, 0}, + {"Block.Type", Field, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToMemory", Func, 0}, + }, + "encoding/xml": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeElement", Method, 0}, + {"(*Decoder).InputOffset", Method, 4}, + {"(*Decoder).InputPos", Method, 19}, + {"(*Decoder).RawToken", Method, 0}, + {"(*Decoder).Skip", Method, 0}, + {"(*Decoder).Token", Method, 0}, + {"(*Encoder).Close", Method, 20}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeElement", Method, 2}, + {"(*Encoder).EncodeToken", Method, 2}, + {"(*Encoder).Flush", Method, 2}, + {"(*Encoder).Indent", Method, 1}, + {"(*SyntaxError).Error", Method, 0}, + {"(*TagPathError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(CharData).Copy", Method, 0}, + {"(Comment).Copy", Method, 0}, + {"(Directive).Copy", Method, 0}, + {"(ProcInst).Copy", Method, 0}, + {"(StartElement).Copy", Method, 0}, + {"(StartElement).End", Method, 2}, + {"(UnmarshalError).Error", Method, 0}, + {"Attr", Type, 0}, + {"Attr.Name", Field, 0}, + {"Attr.Value", Field, 0}, + {"CharData", Type, 0}, + {"Comment", Type, 0}, + {"CopyToken", Func, 0}, + {"Decoder", Type, 0}, + {"Decoder.AutoClose", Field, 0}, + {"Decoder.CharsetReader", Field, 0}, + {"Decoder.DefaultSpace", Field, 1}, + {"Decoder.Entity", Field, 0}, + {"Decoder.Strict", Field, 0}, + {"Directive", Type, 0}, + {"Encoder", Type, 0}, + {"EndElement", Type, 0}, + {"EndElement.Name", Field, 0}, + {"Escape", Func, 0}, + {"EscapeText", Func, 1}, + {"HTMLAutoClose", Var, 0}, + {"HTMLEntity", Var, 0}, + {"Header", Const, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 2}, + {"MarshalerAttr", Type, 2}, + {"Name", Type, 0}, + {"Name.Local", Field, 0}, + {"Name.Space", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewTokenDecoder", Func, 10}, + {"ProcInst", Type, 0}, + {"ProcInst.Inst", Field, 0}, + {"ProcInst.Target", Field, 0}, + {"StartElement", Type, 0}, + {"StartElement.Attr", Field, 0}, + {"StartElement.Name", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Line", Field, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagPathError", Type, 0}, + {"TagPathError.Field1", Field, 0}, + {"TagPathError.Field2", Field, 0}, + {"TagPathError.Struct", Field, 0}, + {"TagPathError.Tag1", Field, 0}, + {"TagPathError.Tag2", Field, 0}, + {"Token", Type, 0}, + {"TokenReader", Type, 10}, + {"Unmarshal", Func, 0}, + {"UnmarshalError", Type, 0}, + {"Unmarshaler", Type, 2}, + {"UnmarshalerAttr", Type, 2}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + }, + "errors": { + {"As", Func, 13}, + {"ErrUnsupported", Var, 21}, + {"Is", Func, 13}, + {"Join", Func, 20}, + {"New", Func, 0}, + {"Unwrap", Func, 13}, + }, + "expvar": { + {"(*Float).Add", Method, 0}, + {"(*Float).Set", Method, 0}, + {"(*Float).String", Method, 0}, + {"(*Float).Value", Method, 8}, + {"(*Int).Add", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).String", Method, 0}, + {"(*Int).Value", Method, 8}, + {"(*Map).Add", Method, 0}, + {"(*Map).AddFloat", Method, 0}, + {"(*Map).Delete", Method, 12}, + {"(*Map).Do", Method, 0}, + {"(*Map).Get", Method, 0}, + {"(*Map).Init", Method, 0}, + {"(*Map).Set", Method, 0}, + {"(*Map).String", Method, 0}, + {"(*String).Set", Method, 0}, + {"(*String).String", Method, 0}, + {"(*String).Value", Method, 8}, + {"(Func).String", Method, 0}, + {"(Func).Value", Method, 8}, + {"Do", Func, 0}, + {"Float", Type, 0}, + {"Func", Type, 0}, + {"Get", Func, 0}, + {"Handler", Func, 8}, + {"Int", Type, 0}, + {"KeyValue", Type, 0}, + {"KeyValue.Key", Field, 0}, + {"KeyValue.Value", Field, 0}, + {"Map", Type, 0}, + {"NewFloat", Func, 0}, + {"NewInt", Func, 0}, + {"NewMap", Func, 0}, + {"NewString", Func, 0}, + {"Publish", Func, 0}, + {"String", Type, 0}, + {"Var", Type, 0}, + }, + "flag": { + {"(*FlagSet).Arg", Method, 0}, + {"(*FlagSet).Args", Method, 0}, + {"(*FlagSet).Bool", Method, 0}, + {"(*FlagSet).BoolFunc", Method, 21}, + {"(*FlagSet).BoolVar", Method, 0}, + {"(*FlagSet).Duration", Method, 0}, + {"(*FlagSet).DurationVar", Method, 0}, + {"(*FlagSet).ErrorHandling", Method, 10}, + {"(*FlagSet).Float64", Method, 0}, + {"(*FlagSet).Float64Var", Method, 0}, + {"(*FlagSet).Func", Method, 16}, + {"(*FlagSet).Init", Method, 0}, + {"(*FlagSet).Int", Method, 0}, + {"(*FlagSet).Int64", Method, 0}, + {"(*FlagSet).Int64Var", Method, 0}, + {"(*FlagSet).IntVar", Method, 0}, + {"(*FlagSet).Lookup", Method, 0}, + {"(*FlagSet).NArg", Method, 0}, + {"(*FlagSet).NFlag", Method, 0}, + {"(*FlagSet).Name", Method, 10}, + {"(*FlagSet).Output", Method, 10}, + {"(*FlagSet).Parse", Method, 0}, + {"(*FlagSet).Parsed", Method, 0}, + {"(*FlagSet).PrintDefaults", Method, 0}, + {"(*FlagSet).Set", Method, 0}, + {"(*FlagSet).SetOutput", Method, 0}, + {"(*FlagSet).String", Method, 0}, + {"(*FlagSet).StringVar", Method, 0}, + {"(*FlagSet).TextVar", Method, 19}, + {"(*FlagSet).Uint", Method, 0}, + {"(*FlagSet).Uint64", Method, 0}, + {"(*FlagSet).Uint64Var", Method, 0}, + {"(*FlagSet).UintVar", Method, 0}, + {"(*FlagSet).Var", Method, 0}, + {"(*FlagSet).Visit", Method, 0}, + {"(*FlagSet).VisitAll", Method, 0}, + {"Arg", Func, 0}, + {"Args", Func, 0}, + {"Bool", Func, 0}, + {"BoolFunc", Func, 21}, + {"BoolVar", Func, 0}, + {"CommandLine", Var, 2}, + {"ContinueOnError", Const, 0}, + {"Duration", Func, 0}, + {"DurationVar", Func, 0}, + {"ErrHelp", Var, 0}, + {"ErrorHandling", Type, 0}, + {"ExitOnError", Const, 0}, + {"Flag", Type, 0}, + {"Flag.DefValue", Field, 0}, + {"Flag.Name", Field, 0}, + {"Flag.Usage", Field, 0}, + {"Flag.Value", Field, 0}, + {"FlagSet", Type, 0}, + {"FlagSet.Usage", Field, 0}, + {"Float64", Func, 0}, + {"Float64Var", Func, 0}, + {"Func", Func, 16}, + {"Getter", Type, 2}, + {"Int", Func, 0}, + {"Int64", Func, 0}, + {"Int64Var", Func, 0}, + {"IntVar", Func, 0}, + {"Lookup", Func, 0}, + {"NArg", Func, 0}, + {"NFlag", Func, 0}, + {"NewFlagSet", Func, 0}, + {"PanicOnError", Const, 0}, + {"Parse", Func, 0}, + {"Parsed", Func, 0}, + {"PrintDefaults", Func, 0}, + {"Set", Func, 0}, + {"String", Func, 0}, + {"StringVar", Func, 0}, + {"TextVar", Func, 19}, + {"Uint", Func, 0}, + {"Uint64", Func, 0}, + {"Uint64Var", Func, 0}, + {"UintVar", Func, 0}, + {"UnquoteUsage", Func, 5}, + {"Usage", Var, 0}, + {"Value", Type, 0}, + {"Var", Func, 0}, + {"Visit", Func, 0}, + {"VisitAll", Func, 0}, + }, + "fmt": { + {"Append", Func, 19}, + {"Appendf", Func, 19}, + {"Appendln", Func, 19}, + {"Errorf", Func, 0}, + {"FormatString", Func, 20}, + {"Formatter", Type, 0}, + {"Fprint", Func, 0}, + {"Fprintf", Func, 0}, + {"Fprintln", Func, 0}, + {"Fscan", Func, 0}, + {"Fscanf", Func, 0}, + {"Fscanln", Func, 0}, + {"GoStringer", Type, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"Scan", Func, 0}, + {"ScanState", Type, 0}, + {"Scanf", Func, 0}, + {"Scanln", Func, 0}, + {"Scanner", Type, 0}, + {"Sprint", Func, 0}, + {"Sprintf", Func, 0}, + {"Sprintln", Func, 0}, + {"Sscan", Func, 0}, + {"Sscanf", Func, 0}, + {"Sscanln", Func, 0}, + {"State", Type, 0}, + {"Stringer", Type, 0}, + }, + "go/ast": { + {"(*ArrayType).End", Method, 0}, + {"(*ArrayType).Pos", Method, 0}, + {"(*AssignStmt).End", Method, 0}, + {"(*AssignStmt).Pos", Method, 0}, + {"(*BadDecl).End", Method, 0}, + {"(*BadDecl).Pos", Method, 0}, + {"(*BadExpr).End", Method, 0}, + {"(*BadExpr).Pos", Method, 0}, + {"(*BadStmt).End", Method, 0}, + {"(*BadStmt).Pos", Method, 0}, + {"(*BasicLit).End", Method, 0}, + {"(*BasicLit).Pos", Method, 0}, + {"(*BinaryExpr).End", Method, 0}, + {"(*BinaryExpr).Pos", Method, 0}, + {"(*BlockStmt).End", Method, 0}, + {"(*BlockStmt).Pos", Method, 0}, + {"(*BranchStmt).End", Method, 0}, + {"(*BranchStmt).Pos", Method, 0}, + {"(*CallExpr).End", Method, 0}, + {"(*CallExpr).Pos", Method, 0}, + {"(*CaseClause).End", Method, 0}, + {"(*CaseClause).Pos", Method, 0}, + {"(*ChanType).End", Method, 0}, + {"(*ChanType).Pos", Method, 0}, + {"(*CommClause).End", Method, 0}, + {"(*CommClause).Pos", Method, 0}, + {"(*Comment).End", Method, 0}, + {"(*Comment).Pos", Method, 0}, + {"(*CommentGroup).End", Method, 0}, + {"(*CommentGroup).Pos", Method, 0}, + {"(*CommentGroup).Text", Method, 0}, + {"(*CompositeLit).End", Method, 0}, + {"(*CompositeLit).Pos", Method, 0}, + {"(*DeclStmt).End", Method, 0}, + {"(*DeclStmt).Pos", Method, 0}, + {"(*DeferStmt).End", Method, 0}, + {"(*DeferStmt).Pos", Method, 0}, + {"(*Ellipsis).End", Method, 0}, + {"(*Ellipsis).Pos", Method, 0}, + {"(*EmptyStmt).End", Method, 0}, + {"(*EmptyStmt).Pos", Method, 0}, + {"(*ExprStmt).End", Method, 0}, + {"(*ExprStmt).Pos", Method, 0}, + {"(*Field).End", Method, 0}, + {"(*Field).Pos", Method, 0}, + {"(*FieldList).End", Method, 0}, + {"(*FieldList).NumFields", Method, 0}, + {"(*FieldList).Pos", Method, 0}, + {"(*File).End", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*ForStmt).End", Method, 0}, + {"(*ForStmt).Pos", Method, 0}, + {"(*FuncDecl).End", Method, 0}, + {"(*FuncDecl).Pos", Method, 0}, + {"(*FuncLit).End", Method, 0}, + {"(*FuncLit).Pos", Method, 0}, + {"(*FuncType).End", Method, 0}, + {"(*FuncType).Pos", Method, 0}, + {"(*GenDecl).End", Method, 0}, + {"(*GenDecl).Pos", Method, 0}, + {"(*GoStmt).End", Method, 0}, + {"(*GoStmt).Pos", Method, 0}, + {"(*Ident).End", Method, 0}, + {"(*Ident).IsExported", Method, 0}, + {"(*Ident).Pos", Method, 0}, + {"(*Ident).String", Method, 0}, + {"(*IfStmt).End", Method, 0}, + {"(*IfStmt).Pos", Method, 0}, + {"(*ImportSpec).End", Method, 0}, + {"(*ImportSpec).Pos", Method, 0}, + {"(*IncDecStmt).End", Method, 0}, + {"(*IncDecStmt).Pos", Method, 0}, + {"(*IndexExpr).End", Method, 0}, + {"(*IndexExpr).Pos", Method, 0}, + {"(*IndexListExpr).End", Method, 18}, + {"(*IndexListExpr).Pos", Method, 18}, + {"(*InterfaceType).End", Method, 0}, + {"(*InterfaceType).Pos", Method, 0}, + {"(*KeyValueExpr).End", Method, 0}, + {"(*KeyValueExpr).Pos", Method, 0}, + {"(*LabeledStmt).End", Method, 0}, + {"(*LabeledStmt).Pos", Method, 0}, + {"(*MapType).End", Method, 0}, + {"(*MapType).Pos", Method, 0}, + {"(*Object).Pos", Method, 0}, + {"(*Package).End", Method, 0}, + {"(*Package).Pos", Method, 0}, + {"(*ParenExpr).End", Method, 0}, + {"(*ParenExpr).Pos", Method, 0}, + {"(*RangeStmt).End", Method, 0}, + {"(*RangeStmt).Pos", Method, 0}, + {"(*ReturnStmt).End", Method, 0}, + {"(*ReturnStmt).Pos", Method, 0}, + {"(*Scope).Insert", Method, 0}, + {"(*Scope).Lookup", Method, 0}, + {"(*Scope).String", Method, 0}, + {"(*SelectStmt).End", Method, 0}, + {"(*SelectStmt).Pos", Method, 0}, + {"(*SelectorExpr).End", Method, 0}, + {"(*SelectorExpr).Pos", Method, 0}, + {"(*SendStmt).End", Method, 0}, + {"(*SendStmt).Pos", Method, 0}, + {"(*SliceExpr).End", Method, 0}, + {"(*SliceExpr).Pos", Method, 0}, + {"(*StarExpr).End", Method, 0}, + {"(*StarExpr).Pos", Method, 0}, + {"(*StructType).End", Method, 0}, + {"(*StructType).Pos", Method, 0}, + {"(*SwitchStmt).End", Method, 0}, + {"(*SwitchStmt).Pos", Method, 0}, + {"(*TypeAssertExpr).End", Method, 0}, + {"(*TypeAssertExpr).Pos", Method, 0}, + {"(*TypeSpec).End", Method, 0}, + {"(*TypeSpec).Pos", Method, 0}, + {"(*TypeSwitchStmt).End", Method, 0}, + {"(*TypeSwitchStmt).Pos", Method, 0}, + {"(*UnaryExpr).End", Method, 0}, + {"(*UnaryExpr).Pos", Method, 0}, + {"(*ValueSpec).End", Method, 0}, + {"(*ValueSpec).Pos", Method, 0}, + {"(CommentMap).Comments", Method, 1}, + {"(CommentMap).Filter", Method, 1}, + {"(CommentMap).String", Method, 1}, + {"(CommentMap).Update", Method, 1}, + {"(ObjKind).String", Method, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.Elt", Field, 0}, + {"ArrayType.Lbrack", Field, 0}, + {"ArrayType.Len", Field, 0}, + {"AssignStmt", Type, 0}, + {"AssignStmt.Lhs", Field, 0}, + {"AssignStmt.Rhs", Field, 0}, + {"AssignStmt.Tok", Field, 0}, + {"AssignStmt.TokPos", Field, 0}, + {"Bad", Const, 0}, + {"BadDecl", Type, 0}, + {"BadDecl.From", Field, 0}, + {"BadDecl.To", Field, 0}, + {"BadExpr", Type, 0}, + {"BadExpr.From", Field, 0}, + {"BadExpr.To", Field, 0}, + {"BadStmt", Type, 0}, + {"BadStmt.From", Field, 0}, + {"BadStmt.To", Field, 0}, + {"BasicLit", Type, 0}, + {"BasicLit.Kind", Field, 0}, + {"BasicLit.Value", Field, 0}, + {"BasicLit.ValuePos", Field, 0}, + {"BinaryExpr", Type, 0}, + {"BinaryExpr.Op", Field, 0}, + {"BinaryExpr.OpPos", Field, 0}, + {"BinaryExpr.X", Field, 0}, + {"BinaryExpr.Y", Field, 0}, + {"BlockStmt", Type, 0}, + {"BlockStmt.Lbrace", Field, 0}, + {"BlockStmt.List", Field, 0}, + {"BlockStmt.Rbrace", Field, 0}, + {"BranchStmt", Type, 0}, + {"BranchStmt.Label", Field, 0}, + {"BranchStmt.Tok", Field, 0}, + {"BranchStmt.TokPos", Field, 0}, + {"CallExpr", Type, 0}, + {"CallExpr.Args", Field, 0}, + {"CallExpr.Ellipsis", Field, 0}, + {"CallExpr.Fun", Field, 0}, + {"CallExpr.Lparen", Field, 0}, + {"CallExpr.Rparen", Field, 0}, + {"CaseClause", Type, 0}, + {"CaseClause.Body", Field, 0}, + {"CaseClause.Case", Field, 0}, + {"CaseClause.Colon", Field, 0}, + {"CaseClause.List", Field, 0}, + {"ChanDir", Type, 0}, + {"ChanType", Type, 0}, + {"ChanType.Arrow", Field, 1}, + {"ChanType.Begin", Field, 0}, + {"ChanType.Dir", Field, 0}, + {"ChanType.Value", Field, 0}, + {"CommClause", Type, 0}, + {"CommClause.Body", Field, 0}, + {"CommClause.Case", Field, 0}, + {"CommClause.Colon", Field, 0}, + {"CommClause.Comm", Field, 0}, + {"Comment", Type, 0}, + {"Comment.Slash", Field, 0}, + {"Comment.Text", Field, 0}, + {"CommentGroup", Type, 0}, + {"CommentGroup.List", Field, 0}, + {"CommentMap", Type, 1}, + {"CompositeLit", Type, 0}, + {"CompositeLit.Elts", Field, 0}, + {"CompositeLit.Incomplete", Field, 11}, + {"CompositeLit.Lbrace", Field, 0}, + {"CompositeLit.Rbrace", Field, 0}, + {"CompositeLit.Type", Field, 0}, + {"Con", Const, 0}, + {"Decl", Type, 0}, + {"DeclStmt", Type, 0}, + {"DeclStmt.Decl", Field, 0}, + {"DeferStmt", Type, 0}, + {"DeferStmt.Call", Field, 0}, + {"DeferStmt.Defer", Field, 0}, + {"Ellipsis", Type, 0}, + {"Ellipsis.Ellipsis", Field, 0}, + {"Ellipsis.Elt", Field, 0}, + {"EmptyStmt", Type, 0}, + {"EmptyStmt.Implicit", Field, 5}, + {"EmptyStmt.Semicolon", Field, 0}, + {"Expr", Type, 0}, + {"ExprStmt", Type, 0}, + {"ExprStmt.X", Field, 0}, + {"Field", Type, 0}, + {"Field.Comment", Field, 0}, + {"Field.Doc", Field, 0}, + {"Field.Names", Field, 0}, + {"Field.Tag", Field, 0}, + {"Field.Type", Field, 0}, + {"FieldFilter", Type, 0}, + {"FieldList", Type, 0}, + {"FieldList.Closing", Field, 0}, + {"FieldList.List", Field, 0}, + {"FieldList.Opening", Field, 0}, + {"File", Type, 0}, + {"File.Comments", Field, 0}, + {"File.Decls", Field, 0}, + {"File.Doc", Field, 0}, + {"File.FileEnd", Field, 20}, + {"File.FileStart", Field, 20}, + {"File.GoVersion", Field, 21}, + {"File.Imports", Field, 0}, + {"File.Name", Field, 0}, + {"File.Package", Field, 0}, + {"File.Scope", Field, 0}, + {"File.Unresolved", Field, 0}, + {"FileExports", Func, 0}, + {"Filter", Type, 0}, + {"FilterDecl", Func, 0}, + {"FilterFile", Func, 0}, + {"FilterFuncDuplicates", Const, 0}, + {"FilterImportDuplicates", Const, 0}, + {"FilterPackage", Func, 0}, + {"FilterUnassociatedComments", Const, 0}, + {"ForStmt", Type, 0}, + {"ForStmt.Body", Field, 0}, + {"ForStmt.Cond", Field, 0}, + {"ForStmt.For", Field, 0}, + {"ForStmt.Init", Field, 0}, + {"ForStmt.Post", Field, 0}, + {"Fprint", Func, 0}, + {"Fun", Const, 0}, + {"FuncDecl", Type, 0}, + {"FuncDecl.Body", Field, 0}, + {"FuncDecl.Doc", Field, 0}, + {"FuncDecl.Name", Field, 0}, + {"FuncDecl.Recv", Field, 0}, + {"FuncDecl.Type", Field, 0}, + {"FuncLit", Type, 0}, + {"FuncLit.Body", Field, 0}, + {"FuncLit.Type", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.Func", Field, 0}, + {"FuncType.Params", Field, 0}, + {"FuncType.Results", Field, 0}, + {"FuncType.TypeParams", Field, 18}, + {"GenDecl", Type, 0}, + {"GenDecl.Doc", Field, 0}, + {"GenDecl.Lparen", Field, 0}, + {"GenDecl.Rparen", Field, 0}, + {"GenDecl.Specs", Field, 0}, + {"GenDecl.Tok", Field, 0}, + {"GenDecl.TokPos", Field, 0}, + {"GoStmt", Type, 0}, + {"GoStmt.Call", Field, 0}, + {"GoStmt.Go", Field, 0}, + {"Ident", Type, 0}, + {"Ident.Name", Field, 0}, + {"Ident.NamePos", Field, 0}, + {"Ident.Obj", Field, 0}, + {"IfStmt", Type, 0}, + {"IfStmt.Body", Field, 0}, + {"IfStmt.Cond", Field, 0}, + {"IfStmt.Else", Field, 0}, + {"IfStmt.If", Field, 0}, + {"IfStmt.Init", Field, 0}, + {"ImportSpec", Type, 0}, + {"ImportSpec.Comment", Field, 0}, + {"ImportSpec.Doc", Field, 0}, + {"ImportSpec.EndPos", Field, 0}, + {"ImportSpec.Name", Field, 0}, + {"ImportSpec.Path", Field, 0}, + {"Importer", Type, 0}, + {"IncDecStmt", Type, 0}, + {"IncDecStmt.Tok", Field, 0}, + {"IncDecStmt.TokPos", Field, 0}, + {"IncDecStmt.X", Field, 0}, + {"IndexExpr", Type, 0}, + {"IndexExpr.Index", Field, 0}, + {"IndexExpr.Lbrack", Field, 0}, + {"IndexExpr.Rbrack", Field, 0}, + {"IndexExpr.X", Field, 0}, + {"IndexListExpr", Type, 18}, + {"IndexListExpr.Indices", Field, 18}, + {"IndexListExpr.Lbrack", Field, 18}, + {"IndexListExpr.Rbrack", Field, 18}, + {"IndexListExpr.X", Field, 18}, + {"Inspect", Func, 0}, + {"InterfaceType", Type, 0}, + {"InterfaceType.Incomplete", Field, 0}, + {"InterfaceType.Interface", Field, 0}, + {"InterfaceType.Methods", Field, 0}, + {"IsExported", Func, 0}, + {"IsGenerated", Func, 21}, + {"KeyValueExpr", Type, 0}, + {"KeyValueExpr.Colon", Field, 0}, + {"KeyValueExpr.Key", Field, 0}, + {"KeyValueExpr.Value", Field, 0}, + {"LabeledStmt", Type, 0}, + {"LabeledStmt.Colon", Field, 0}, + {"LabeledStmt.Label", Field, 0}, + {"LabeledStmt.Stmt", Field, 0}, + {"Lbl", Const, 0}, + {"MapType", Type, 0}, + {"MapType.Key", Field, 0}, + {"MapType.Map", Field, 0}, + {"MapType.Value", Field, 0}, + {"MergeMode", Type, 0}, + {"MergePackageFiles", Func, 0}, + {"NewCommentMap", Func, 1}, + {"NewIdent", Func, 0}, + {"NewObj", Func, 0}, + {"NewPackage", Func, 0}, + {"NewScope", Func, 0}, + {"Node", Type, 0}, + {"NotNilFilter", Func, 0}, + {"ObjKind", Type, 0}, + {"Object", Type, 0}, + {"Object.Data", Field, 0}, + {"Object.Decl", Field, 0}, + {"Object.Kind", Field, 0}, + {"Object.Name", Field, 0}, + {"Object.Type", Field, 0}, + {"Package", Type, 0}, + {"Package.Files", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Scope", Field, 0}, + {"PackageExports", Func, 0}, + {"ParenExpr", Type, 0}, + {"ParenExpr.Lparen", Field, 0}, + {"ParenExpr.Rparen", Field, 0}, + {"ParenExpr.X", Field, 0}, + {"Pkg", Const, 0}, + {"Print", Func, 0}, + {"RECV", Const, 0}, + {"RangeStmt", Type, 0}, + {"RangeStmt.Body", Field, 0}, + {"RangeStmt.For", Field, 0}, + {"RangeStmt.Key", Field, 0}, + {"RangeStmt.Range", Field, 20}, + {"RangeStmt.Tok", Field, 0}, + {"RangeStmt.TokPos", Field, 0}, + {"RangeStmt.Value", Field, 0}, + {"RangeStmt.X", Field, 0}, + {"ReturnStmt", Type, 0}, + {"ReturnStmt.Results", Field, 0}, + {"ReturnStmt.Return", Field, 0}, + {"SEND", Const, 0}, + {"Scope", Type, 0}, + {"Scope.Objects", Field, 0}, + {"Scope.Outer", Field, 0}, + {"SelectStmt", Type, 0}, + {"SelectStmt.Body", Field, 0}, + {"SelectStmt.Select", Field, 0}, + {"SelectorExpr", Type, 0}, + {"SelectorExpr.Sel", Field, 0}, + {"SelectorExpr.X", Field, 0}, + {"SendStmt", Type, 0}, + {"SendStmt.Arrow", Field, 0}, + {"SendStmt.Chan", Field, 0}, + {"SendStmt.Value", Field, 0}, + {"SliceExpr", Type, 0}, + {"SliceExpr.High", Field, 0}, + {"SliceExpr.Lbrack", Field, 0}, + {"SliceExpr.Low", Field, 0}, + {"SliceExpr.Max", Field, 2}, + {"SliceExpr.Rbrack", Field, 0}, + {"SliceExpr.Slice3", Field, 2}, + {"SliceExpr.X", Field, 0}, + {"SortImports", Func, 0}, + {"Spec", Type, 0}, + {"StarExpr", Type, 0}, + {"StarExpr.Star", Field, 0}, + {"StarExpr.X", Field, 0}, + {"Stmt", Type, 0}, + {"StructType", Type, 0}, + {"StructType.Fields", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Struct", Field, 0}, + {"SwitchStmt", Type, 0}, + {"SwitchStmt.Body", Field, 0}, + {"SwitchStmt.Init", Field, 0}, + {"SwitchStmt.Switch", Field, 0}, + {"SwitchStmt.Tag", Field, 0}, + {"Typ", Const, 0}, + {"TypeAssertExpr", Type, 0}, + {"TypeAssertExpr.Lparen", Field, 2}, + {"TypeAssertExpr.Rparen", Field, 2}, + {"TypeAssertExpr.Type", Field, 0}, + {"TypeAssertExpr.X", Field, 0}, + {"TypeSpec", Type, 0}, + {"TypeSpec.Assign", Field, 9}, + {"TypeSpec.Comment", Field, 0}, + {"TypeSpec.Doc", Field, 0}, + {"TypeSpec.Name", Field, 0}, + {"TypeSpec.Type", Field, 0}, + {"TypeSpec.TypeParams", Field, 18}, + {"TypeSwitchStmt", Type, 0}, + {"TypeSwitchStmt.Assign", Field, 0}, + {"TypeSwitchStmt.Body", Field, 0}, + {"TypeSwitchStmt.Init", Field, 0}, + {"TypeSwitchStmt.Switch", Field, 0}, + {"UnaryExpr", Type, 0}, + {"UnaryExpr.Op", Field, 0}, + {"UnaryExpr.OpPos", Field, 0}, + {"UnaryExpr.X", Field, 0}, + {"Unparen", Func, 22}, + {"ValueSpec", Type, 0}, + {"ValueSpec.Comment", Field, 0}, + {"ValueSpec.Doc", Field, 0}, + {"ValueSpec.Names", Field, 0}, + {"ValueSpec.Type", Field, 0}, + {"ValueSpec.Values", Field, 0}, + {"Var", Const, 0}, + {"Visitor", Type, 0}, + {"Walk", Func, 0}, + }, + "go/build": { + {"(*Context).Import", Method, 0}, + {"(*Context).ImportDir", Method, 0}, + {"(*Context).MatchFile", Method, 2}, + {"(*Context).SrcDirs", Method, 0}, + {"(*MultiplePackageError).Error", Method, 4}, + {"(*NoGoError).Error", Method, 0}, + {"(*Package).IsCommand", Method, 0}, + {"AllowBinary", Const, 0}, + {"ArchChar", Func, 0}, + {"Context", Type, 0}, + {"Context.BuildTags", Field, 0}, + {"Context.CgoEnabled", Field, 0}, + {"Context.Compiler", Field, 0}, + {"Context.Dir", Field, 14}, + {"Context.GOARCH", Field, 0}, + {"Context.GOOS", Field, 0}, + {"Context.GOPATH", Field, 0}, + {"Context.GOROOT", Field, 0}, + {"Context.HasSubdir", Field, 0}, + {"Context.InstallSuffix", Field, 1}, + {"Context.IsAbsPath", Field, 0}, + {"Context.IsDir", Field, 0}, + {"Context.JoinPath", Field, 0}, + {"Context.OpenFile", Field, 0}, + {"Context.ReadDir", Field, 0}, + {"Context.ReleaseTags", Field, 1}, + {"Context.SplitPathList", Field, 0}, + {"Context.ToolTags", Field, 17}, + {"Context.UseAllFiles", Field, 0}, + {"Default", Var, 0}, + {"Directive", Type, 21}, + {"Directive.Pos", Field, 21}, + {"Directive.Text", Field, 21}, + {"FindOnly", Const, 0}, + {"IgnoreVendor", Const, 6}, + {"Import", Func, 0}, + {"ImportComment", Const, 4}, + {"ImportDir", Func, 0}, + {"ImportMode", Type, 0}, + {"IsLocalImport", Func, 0}, + {"MultiplePackageError", Type, 4}, + {"MultiplePackageError.Dir", Field, 4}, + {"MultiplePackageError.Files", Field, 4}, + {"MultiplePackageError.Packages", Field, 4}, + {"NoGoError", Type, 0}, + {"NoGoError.Dir", Field, 0}, + {"Package", Type, 0}, + {"Package.AllTags", Field, 2}, + {"Package.BinDir", Field, 0}, + {"Package.BinaryOnly", Field, 7}, + {"Package.CFiles", Field, 0}, + {"Package.CXXFiles", Field, 2}, + {"Package.CgoCFLAGS", Field, 0}, + {"Package.CgoCPPFLAGS", Field, 2}, + {"Package.CgoCXXFLAGS", Field, 2}, + {"Package.CgoFFLAGS", Field, 7}, + {"Package.CgoFiles", Field, 0}, + {"Package.CgoLDFLAGS", Field, 0}, + {"Package.CgoPkgConfig", Field, 0}, + {"Package.ConflictDir", Field, 2}, + {"Package.Dir", Field, 0}, + {"Package.Directives", Field, 21}, + {"Package.Doc", Field, 0}, + {"Package.EmbedPatternPos", Field, 16}, + {"Package.EmbedPatterns", Field, 16}, + {"Package.FFiles", Field, 7}, + {"Package.GoFiles", Field, 0}, + {"Package.Goroot", Field, 0}, + {"Package.HFiles", Field, 0}, + {"Package.IgnoredGoFiles", Field, 1}, + {"Package.IgnoredOtherFiles", Field, 16}, + {"Package.ImportComment", Field, 4}, + {"Package.ImportPath", Field, 0}, + {"Package.ImportPos", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.InvalidGoFiles", Field, 6}, + {"Package.MFiles", Field, 3}, + {"Package.Name", Field, 0}, + {"Package.PkgObj", Field, 0}, + {"Package.PkgRoot", Field, 0}, + {"Package.PkgTargetRoot", Field, 5}, + {"Package.Root", Field, 0}, + {"Package.SFiles", Field, 0}, + {"Package.SrcRoot", Field, 0}, + {"Package.SwigCXXFiles", Field, 1}, + {"Package.SwigFiles", Field, 1}, + {"Package.SysoFiles", Field, 0}, + {"Package.TestDirectives", Field, 21}, + {"Package.TestEmbedPatternPos", Field, 16}, + {"Package.TestEmbedPatterns", Field, 16}, + {"Package.TestGoFiles", Field, 0}, + {"Package.TestImportPos", Field, 0}, + {"Package.TestImports", Field, 0}, + {"Package.XTestDirectives", Field, 21}, + {"Package.XTestEmbedPatternPos", Field, 16}, + {"Package.XTestEmbedPatterns", Field, 16}, + {"Package.XTestGoFiles", Field, 0}, + {"Package.XTestImportPos", Field, 0}, + {"Package.XTestImports", Field, 0}, + {"ToolDir", Var, 0}, + }, + "go/build/constraint": { + {"(*AndExpr).Eval", Method, 16}, + {"(*AndExpr).String", Method, 16}, + {"(*NotExpr).Eval", Method, 16}, + {"(*NotExpr).String", Method, 16}, + {"(*OrExpr).Eval", Method, 16}, + {"(*OrExpr).String", Method, 16}, + {"(*SyntaxError).Error", Method, 16}, + {"(*TagExpr).Eval", Method, 16}, + {"(*TagExpr).String", Method, 16}, + {"AndExpr", Type, 16}, + {"AndExpr.X", Field, 16}, + {"AndExpr.Y", Field, 16}, + {"Expr", Type, 16}, + {"GoVersion", Func, 21}, + {"IsGoBuild", Func, 16}, + {"IsPlusBuild", Func, 16}, + {"NotExpr", Type, 16}, + {"NotExpr.X", Field, 16}, + {"OrExpr", Type, 16}, + {"OrExpr.X", Field, 16}, + {"OrExpr.Y", Field, 16}, + {"Parse", Func, 16}, + {"PlusBuildLines", Func, 16}, + {"SyntaxError", Type, 16}, + {"SyntaxError.Err", Field, 16}, + {"SyntaxError.Offset", Field, 16}, + {"TagExpr", Type, 16}, + {"TagExpr.Tag", Field, 16}, + }, + "go/constant": { + {"(Kind).String", Method, 18}, + {"BinaryOp", Func, 5}, + {"BitLen", Func, 5}, + {"Bool", Const, 5}, + {"BoolVal", Func, 5}, + {"Bytes", Func, 5}, + {"Compare", Func, 5}, + {"Complex", Const, 5}, + {"Denom", Func, 5}, + {"Float", Const, 5}, + {"Float32Val", Func, 5}, + {"Float64Val", Func, 5}, + {"Imag", Func, 5}, + {"Int", Const, 5}, + {"Int64Val", Func, 5}, + {"Kind", Type, 5}, + {"Make", Func, 13}, + {"MakeBool", Func, 5}, + {"MakeFloat64", Func, 5}, + {"MakeFromBytes", Func, 5}, + {"MakeFromLiteral", Func, 5}, + {"MakeImag", Func, 5}, + {"MakeInt64", Func, 5}, + {"MakeString", Func, 5}, + {"MakeUint64", Func, 5}, + {"MakeUnknown", Func, 5}, + {"Num", Func, 5}, + {"Real", Func, 5}, + {"Shift", Func, 5}, + {"Sign", Func, 5}, + {"String", Const, 5}, + {"StringVal", Func, 5}, + {"ToComplex", Func, 6}, + {"ToFloat", Func, 6}, + {"ToInt", Func, 6}, + {"Uint64Val", Func, 5}, + {"UnaryOp", Func, 5}, + {"Unknown", Const, 5}, + {"Val", Func, 13}, + {"Value", Type, 5}, + }, + "go/doc": { + {"(*Package).Filter", Method, 0}, + {"(*Package).HTML", Method, 19}, + {"(*Package).Markdown", Method, 19}, + {"(*Package).Parser", Method, 19}, + {"(*Package).Printer", Method, 19}, + {"(*Package).Synopsis", Method, 19}, + {"(*Package).Text", Method, 19}, + {"AllDecls", Const, 0}, + {"AllMethods", Const, 0}, + {"Example", Type, 0}, + {"Example.Code", Field, 0}, + {"Example.Comments", Field, 0}, + {"Example.Doc", Field, 0}, + {"Example.EmptyOutput", Field, 1}, + {"Example.Name", Field, 0}, + {"Example.Order", Field, 1}, + {"Example.Output", Field, 0}, + {"Example.Play", Field, 1}, + {"Example.Suffix", Field, 14}, + {"Example.Unordered", Field, 7}, + {"Examples", Func, 0}, + {"Filter", Type, 0}, + {"Func", Type, 0}, + {"Func.Decl", Field, 0}, + {"Func.Doc", Field, 0}, + {"Func.Examples", Field, 14}, + {"Func.Level", Field, 0}, + {"Func.Name", Field, 0}, + {"Func.Orig", Field, 0}, + {"Func.Recv", Field, 0}, + {"IllegalPrefixes", Var, 1}, + {"IsPredeclared", Func, 8}, + {"Mode", Type, 0}, + {"New", Func, 0}, + {"NewFromFiles", Func, 14}, + {"Note", Type, 1}, + {"Note.Body", Field, 1}, + {"Note.End", Field, 1}, + {"Note.Pos", Field, 1}, + {"Note.UID", Field, 1}, + {"Package", Type, 0}, + {"Package.Bugs", Field, 0}, + {"Package.Consts", Field, 0}, + {"Package.Doc", Field, 0}, + {"Package.Examples", Field, 14}, + {"Package.Filenames", Field, 0}, + {"Package.Funcs", Field, 0}, + {"Package.ImportPath", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Notes", Field, 1}, + {"Package.Types", Field, 0}, + {"Package.Vars", Field, 0}, + {"PreserveAST", Const, 12}, + {"Synopsis", Func, 0}, + {"ToHTML", Func, 0}, + {"ToText", Func, 0}, + {"Type", Type, 0}, + {"Type.Consts", Field, 0}, + {"Type.Decl", Field, 0}, + {"Type.Doc", Field, 0}, + {"Type.Examples", Field, 14}, + {"Type.Funcs", Field, 0}, + {"Type.Methods", Field, 0}, + {"Type.Name", Field, 0}, + {"Type.Vars", Field, 0}, + {"Value", Type, 0}, + {"Value.Decl", Field, 0}, + {"Value.Doc", Field, 0}, + {"Value.Names", Field, 0}, + }, + "go/doc/comment": { + {"(*DocLink).DefaultURL", Method, 19}, + {"(*Heading).DefaultID", Method, 19}, + {"(*List).BlankBefore", Method, 19}, + {"(*List).BlankBetween", Method, 19}, + {"(*Parser).Parse", Method, 19}, + {"(*Printer).Comment", Method, 19}, + {"(*Printer).HTML", Method, 19}, + {"(*Printer).Markdown", Method, 19}, + {"(*Printer).Text", Method, 19}, + {"Block", Type, 19}, + {"Code", Type, 19}, + {"Code.Text", Field, 19}, + {"DefaultLookupPackage", Func, 19}, + {"Doc", Type, 19}, + {"Doc.Content", Field, 19}, + {"Doc.Links", Field, 19}, + {"DocLink", Type, 19}, + {"DocLink.ImportPath", Field, 19}, + {"DocLink.Name", Field, 19}, + {"DocLink.Recv", Field, 19}, + {"DocLink.Text", Field, 19}, + {"Heading", Type, 19}, + {"Heading.Text", Field, 19}, + {"Italic", Type, 19}, + {"Link", Type, 19}, + {"Link.Auto", Field, 19}, + {"Link.Text", Field, 19}, + {"Link.URL", Field, 19}, + {"LinkDef", Type, 19}, + {"LinkDef.Text", Field, 19}, + {"LinkDef.URL", Field, 19}, + {"LinkDef.Used", Field, 19}, + {"List", Type, 19}, + {"List.ForceBlankBefore", Field, 19}, + {"List.ForceBlankBetween", Field, 19}, + {"List.Items", Field, 19}, + {"ListItem", Type, 19}, + {"ListItem.Content", Field, 19}, + {"ListItem.Number", Field, 19}, + {"Paragraph", Type, 19}, + {"Paragraph.Text", Field, 19}, + {"Parser", Type, 19}, + {"Parser.LookupPackage", Field, 19}, + {"Parser.LookupSym", Field, 19}, + {"Parser.Words", Field, 19}, + {"Plain", Type, 19}, + {"Printer", Type, 19}, + {"Printer.DocLinkBaseURL", Field, 19}, + {"Printer.DocLinkURL", Field, 19}, + {"Printer.HeadingID", Field, 19}, + {"Printer.HeadingLevel", Field, 19}, + {"Printer.TextCodePrefix", Field, 19}, + {"Printer.TextPrefix", Field, 19}, + {"Printer.TextWidth", Field, 19}, + {"Text", Type, 19}, + }, + "go/format": { + {"Node", Func, 1}, + {"Source", Func, 1}, + }, + "go/importer": { + {"Default", Func, 5}, + {"For", Func, 5}, + {"ForCompiler", Func, 12}, + {"Lookup", Type, 5}, + }, + "go/parser": { + {"AllErrors", Const, 1}, + {"DeclarationErrors", Const, 0}, + {"ImportsOnly", Const, 0}, + {"Mode", Type, 0}, + {"PackageClauseOnly", Const, 0}, + {"ParseComments", Const, 0}, + {"ParseDir", Func, 0}, + {"ParseExpr", Func, 0}, + {"ParseExprFrom", Func, 5}, + {"ParseFile", Func, 0}, + {"SkipObjectResolution", Const, 17}, + {"SpuriousErrors", Const, 0}, + {"Trace", Const, 0}, + }, + "go/printer": { + {"(*Config).Fprint", Method, 0}, + {"CommentedNode", Type, 0}, + {"CommentedNode.Comments", Field, 0}, + {"CommentedNode.Node", Field, 0}, + {"Config", Type, 0}, + {"Config.Indent", Field, 1}, + {"Config.Mode", Field, 0}, + {"Config.Tabwidth", Field, 0}, + {"Fprint", Func, 0}, + {"Mode", Type, 0}, + {"RawFormat", Const, 0}, + {"SourcePos", Const, 0}, + {"TabIndent", Const, 0}, + {"UseSpaces", Const, 0}, + }, + "go/scanner": { + {"(*ErrorList).Add", Method, 0}, + {"(*ErrorList).RemoveMultiples", Method, 0}, + {"(*ErrorList).Reset", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(Error).Error", Method, 0}, + {"(ErrorList).Err", Method, 0}, + {"(ErrorList).Error", Method, 0}, + {"(ErrorList).Len", Method, 0}, + {"(ErrorList).Less", Method, 0}, + {"(ErrorList).Sort", Method, 0}, + {"(ErrorList).Swap", Method, 0}, + {"Error", Type, 0}, + {"Error.Msg", Field, 0}, + {"Error.Pos", Field, 0}, + {"ErrorHandler", Type, 0}, + {"ErrorList", Type, 0}, + {"Mode", Type, 0}, + {"PrintError", Func, 0}, + {"ScanComments", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.ErrorCount", Field, 0}, + }, + "go/token": { + {"(*File).AddLine", Method, 0}, + {"(*File).AddLineColumnInfo", Method, 11}, + {"(*File).AddLineInfo", Method, 0}, + {"(*File).Base", Method, 0}, + {"(*File).Line", Method, 0}, + {"(*File).LineCount", Method, 0}, + {"(*File).LineStart", Method, 12}, + {"(*File).Lines", Method, 21}, + {"(*File).MergeLine", Method, 2}, + {"(*File).Name", Method, 0}, + {"(*File).Offset", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*File).Position", Method, 0}, + {"(*File).PositionFor", Method, 4}, + {"(*File).SetLines", Method, 0}, + {"(*File).SetLinesForContent", Method, 0}, + {"(*File).Size", Method, 0}, + {"(*FileSet).AddFile", Method, 0}, + {"(*FileSet).Base", Method, 0}, + {"(*FileSet).File", Method, 0}, + {"(*FileSet).Iterate", Method, 0}, + {"(*FileSet).Position", Method, 0}, + {"(*FileSet).PositionFor", Method, 4}, + {"(*FileSet).Read", Method, 0}, + {"(*FileSet).RemoveFile", Method, 20}, + {"(*FileSet).Write", Method, 0}, + {"(*Position).IsValid", Method, 0}, + {"(Pos).IsValid", Method, 0}, + {"(Position).String", Method, 0}, + {"(Token).IsKeyword", Method, 0}, + {"(Token).IsLiteral", Method, 0}, + {"(Token).IsOperator", Method, 0}, + {"(Token).Precedence", Method, 0}, + {"(Token).String", Method, 0}, + {"ADD", Const, 0}, + {"ADD_ASSIGN", Const, 0}, + {"AND", Const, 0}, + {"AND_ASSIGN", Const, 0}, + {"AND_NOT", Const, 0}, + {"AND_NOT_ASSIGN", Const, 0}, + {"ARROW", Const, 0}, + {"ASSIGN", Const, 0}, + {"BREAK", Const, 0}, + {"CASE", Const, 0}, + {"CHAN", Const, 0}, + {"CHAR", Const, 0}, + {"COLON", Const, 0}, + {"COMMA", Const, 0}, + {"COMMENT", Const, 0}, + {"CONST", Const, 0}, + {"CONTINUE", Const, 0}, + {"DEC", Const, 0}, + {"DEFAULT", Const, 0}, + {"DEFER", Const, 0}, + {"DEFINE", Const, 0}, + {"ELLIPSIS", Const, 0}, + {"ELSE", Const, 0}, + {"EOF", Const, 0}, + {"EQL", Const, 0}, + {"FALLTHROUGH", Const, 0}, + {"FLOAT", Const, 0}, + {"FOR", Const, 0}, + {"FUNC", Const, 0}, + {"File", Type, 0}, + {"FileSet", Type, 0}, + {"GEQ", Const, 0}, + {"GO", Const, 0}, + {"GOTO", Const, 0}, + {"GTR", Const, 0}, + {"HighestPrec", Const, 0}, + {"IDENT", Const, 0}, + {"IF", Const, 0}, + {"ILLEGAL", Const, 0}, + {"IMAG", Const, 0}, + {"IMPORT", Const, 0}, + {"INC", Const, 0}, + {"INT", Const, 0}, + {"INTERFACE", Const, 0}, + {"IsExported", Func, 13}, + {"IsIdentifier", Func, 13}, + {"IsKeyword", Func, 13}, + {"LAND", Const, 0}, + {"LBRACE", Const, 0}, + {"LBRACK", Const, 0}, + {"LEQ", Const, 0}, + {"LOR", Const, 0}, + {"LPAREN", Const, 0}, + {"LSS", Const, 0}, + {"Lookup", Func, 0}, + {"LowestPrec", Const, 0}, + {"MAP", Const, 0}, + {"MUL", Const, 0}, + {"MUL_ASSIGN", Const, 0}, + {"NEQ", Const, 0}, + {"NOT", Const, 0}, + {"NewFileSet", Func, 0}, + {"NoPos", Const, 0}, + {"OR", Const, 0}, + {"OR_ASSIGN", Const, 0}, + {"PACKAGE", Const, 0}, + {"PERIOD", Const, 0}, + {"Pos", Type, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"QUO", Const, 0}, + {"QUO_ASSIGN", Const, 0}, + {"RANGE", Const, 0}, + {"RBRACE", Const, 0}, + {"RBRACK", Const, 0}, + {"REM", Const, 0}, + {"REM_ASSIGN", Const, 0}, + {"RETURN", Const, 0}, + {"RPAREN", Const, 0}, + {"SELECT", Const, 0}, + {"SEMICOLON", Const, 0}, + {"SHL", Const, 0}, + {"SHL_ASSIGN", Const, 0}, + {"SHR", Const, 0}, + {"SHR_ASSIGN", Const, 0}, + {"STRING", Const, 0}, + {"STRUCT", Const, 0}, + {"SUB", Const, 0}, + {"SUB_ASSIGN", Const, 0}, + {"SWITCH", Const, 0}, + {"TILDE", Const, 18}, + {"TYPE", Const, 0}, + {"Token", Type, 0}, + {"UnaryPrec", Const, 0}, + {"VAR", Const, 0}, + {"XOR", Const, 0}, + {"XOR_ASSIGN", Const, 0}, + }, + "go/types": { + {"(*Alias).Obj", Method, 22}, + {"(*Alias).String", Method, 22}, + {"(*Alias).Underlying", Method, 22}, + {"(*ArgumentError).Error", Method, 18}, + {"(*ArgumentError).Unwrap", Method, 18}, + {"(*Array).Elem", Method, 5}, + {"(*Array).Len", Method, 5}, + {"(*Array).String", Method, 5}, + {"(*Array).Underlying", Method, 5}, + {"(*Basic).Info", Method, 5}, + {"(*Basic).Kind", Method, 5}, + {"(*Basic).Name", Method, 5}, + {"(*Basic).String", Method, 5}, + {"(*Basic).Underlying", Method, 5}, + {"(*Builtin).Exported", Method, 5}, + {"(*Builtin).Id", Method, 5}, + {"(*Builtin).Name", Method, 5}, + {"(*Builtin).Parent", Method, 5}, + {"(*Builtin).Pkg", Method, 5}, + {"(*Builtin).Pos", Method, 5}, + {"(*Builtin).String", Method, 5}, + {"(*Builtin).Type", Method, 5}, + {"(*Chan).Dir", Method, 5}, + {"(*Chan).Elem", Method, 5}, + {"(*Chan).String", Method, 5}, + {"(*Chan).Underlying", Method, 5}, + {"(*Checker).Files", Method, 5}, + {"(*Config).Check", Method, 5}, + {"(*Const).Exported", Method, 5}, + {"(*Const).Id", Method, 5}, + {"(*Const).Name", Method, 5}, + {"(*Const).Parent", Method, 5}, + {"(*Const).Pkg", Method, 5}, + {"(*Const).Pos", Method, 5}, + {"(*Const).String", Method, 5}, + {"(*Const).Type", Method, 5}, + {"(*Const).Val", Method, 5}, + {"(*Func).Exported", Method, 5}, + {"(*Func).FullName", Method, 5}, + {"(*Func).Id", Method, 5}, + {"(*Func).Name", Method, 5}, + {"(*Func).Origin", Method, 19}, + {"(*Func).Parent", Method, 5}, + {"(*Func).Pkg", Method, 5}, + {"(*Func).Pos", Method, 5}, + {"(*Func).Scope", Method, 5}, + {"(*Func).String", Method, 5}, + {"(*Func).Type", Method, 5}, + {"(*Info).ObjectOf", Method, 5}, + {"(*Info).PkgNameOf", Method, 22}, + {"(*Info).TypeOf", Method, 5}, + {"(*Initializer).String", Method, 5}, + {"(*Interface).Complete", Method, 5}, + {"(*Interface).Embedded", Method, 5}, + {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).Empty", Method, 5}, + {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).IsComparable", Method, 18}, + {"(*Interface).IsImplicit", Method, 18}, + {"(*Interface).IsMethodSet", Method, 18}, + {"(*Interface).MarkImplicit", Method, 18}, + {"(*Interface).Method", Method, 5}, + {"(*Interface).NumEmbeddeds", Method, 5}, + {"(*Interface).NumExplicitMethods", Method, 5}, + {"(*Interface).NumMethods", Method, 5}, + {"(*Interface).String", Method, 5}, + {"(*Interface).Underlying", Method, 5}, + {"(*Label).Exported", Method, 5}, + {"(*Label).Id", Method, 5}, + {"(*Label).Name", Method, 5}, + {"(*Label).Parent", Method, 5}, + {"(*Label).Pkg", Method, 5}, + {"(*Label).Pos", Method, 5}, + {"(*Label).String", Method, 5}, + {"(*Label).Type", Method, 5}, + {"(*Map).Elem", Method, 5}, + {"(*Map).Key", Method, 5}, + {"(*Map).String", Method, 5}, + {"(*Map).Underlying", Method, 5}, + {"(*MethodSet).At", Method, 5}, + {"(*MethodSet).Len", Method, 5}, + {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).String", Method, 5}, + {"(*Named).AddMethod", Method, 5}, + {"(*Named).Method", Method, 5}, + {"(*Named).NumMethods", Method, 5}, + {"(*Named).Obj", Method, 5}, + {"(*Named).Origin", Method, 18}, + {"(*Named).SetTypeParams", Method, 18}, + {"(*Named).SetUnderlying", Method, 5}, + {"(*Named).String", Method, 5}, + {"(*Named).TypeArgs", Method, 18}, + {"(*Named).TypeParams", Method, 18}, + {"(*Named).Underlying", Method, 5}, + {"(*Nil).Exported", Method, 5}, + {"(*Nil).Id", Method, 5}, + {"(*Nil).Name", Method, 5}, + {"(*Nil).Parent", Method, 5}, + {"(*Nil).Pkg", Method, 5}, + {"(*Nil).Pos", Method, 5}, + {"(*Nil).String", Method, 5}, + {"(*Nil).Type", Method, 5}, + {"(*Package).Complete", Method, 5}, + {"(*Package).GoVersion", Method, 21}, + {"(*Package).Imports", Method, 5}, + {"(*Package).MarkComplete", Method, 5}, + {"(*Package).Name", Method, 5}, + {"(*Package).Path", Method, 5}, + {"(*Package).Scope", Method, 5}, + {"(*Package).SetImports", Method, 5}, + {"(*Package).SetName", Method, 6}, + {"(*Package).String", Method, 5}, + {"(*PkgName).Exported", Method, 5}, + {"(*PkgName).Id", Method, 5}, + {"(*PkgName).Imported", Method, 5}, + {"(*PkgName).Name", Method, 5}, + {"(*PkgName).Parent", Method, 5}, + {"(*PkgName).Pkg", Method, 5}, + {"(*PkgName).Pos", Method, 5}, + {"(*PkgName).String", Method, 5}, + {"(*PkgName).Type", Method, 5}, + {"(*Pointer).Elem", Method, 5}, + {"(*Pointer).String", Method, 5}, + {"(*Pointer).Underlying", Method, 5}, + {"(*Scope).Child", Method, 5}, + {"(*Scope).Contains", Method, 5}, + {"(*Scope).End", Method, 5}, + {"(*Scope).Innermost", Method, 5}, + {"(*Scope).Insert", Method, 5}, + {"(*Scope).Len", Method, 5}, + {"(*Scope).Lookup", Method, 5}, + {"(*Scope).LookupParent", Method, 5}, + {"(*Scope).Names", Method, 5}, + {"(*Scope).NumChildren", Method, 5}, + {"(*Scope).Parent", Method, 5}, + {"(*Scope).Pos", Method, 5}, + {"(*Scope).String", Method, 5}, + {"(*Scope).WriteTo", Method, 5}, + {"(*Selection).Index", Method, 5}, + {"(*Selection).Indirect", Method, 5}, + {"(*Selection).Kind", Method, 5}, + {"(*Selection).Obj", Method, 5}, + {"(*Selection).Recv", Method, 5}, + {"(*Selection).String", Method, 5}, + {"(*Selection).Type", Method, 5}, + {"(*Signature).Params", Method, 5}, + {"(*Signature).Recv", Method, 5}, + {"(*Signature).RecvTypeParams", Method, 18}, + {"(*Signature).Results", Method, 5}, + {"(*Signature).String", Method, 5}, + {"(*Signature).TypeParams", Method, 18}, + {"(*Signature).Underlying", Method, 5}, + {"(*Signature).Variadic", Method, 5}, + {"(*Slice).Elem", Method, 5}, + {"(*Slice).String", Method, 5}, + {"(*Slice).Underlying", Method, 5}, + {"(*StdSizes).Alignof", Method, 5}, + {"(*StdSizes).Offsetsof", Method, 5}, + {"(*StdSizes).Sizeof", Method, 5}, + {"(*Struct).Field", Method, 5}, + {"(*Struct).NumFields", Method, 5}, + {"(*Struct).String", Method, 5}, + {"(*Struct).Tag", Method, 5}, + {"(*Struct).Underlying", Method, 5}, + {"(*Term).String", Method, 18}, + {"(*Term).Tilde", Method, 18}, + {"(*Term).Type", Method, 18}, + {"(*Tuple).At", Method, 5}, + {"(*Tuple).Len", Method, 5}, + {"(*Tuple).String", Method, 5}, + {"(*Tuple).Underlying", Method, 5}, + {"(*TypeList).At", Method, 18}, + {"(*TypeList).Len", Method, 18}, + {"(*TypeName).Exported", Method, 5}, + {"(*TypeName).Id", Method, 5}, + {"(*TypeName).IsAlias", Method, 9}, + {"(*TypeName).Name", Method, 5}, + {"(*TypeName).Parent", Method, 5}, + {"(*TypeName).Pkg", Method, 5}, + {"(*TypeName).Pos", Method, 5}, + {"(*TypeName).String", Method, 5}, + {"(*TypeName).Type", Method, 5}, + {"(*TypeParam).Constraint", Method, 18}, + {"(*TypeParam).Index", Method, 18}, + {"(*TypeParam).Obj", Method, 18}, + {"(*TypeParam).SetConstraint", Method, 18}, + {"(*TypeParam).String", Method, 18}, + {"(*TypeParam).Underlying", Method, 18}, + {"(*TypeParamList).At", Method, 18}, + {"(*TypeParamList).Len", Method, 18}, + {"(*Union).Len", Method, 18}, + {"(*Union).String", Method, 18}, + {"(*Union).Term", Method, 18}, + {"(*Union).Underlying", Method, 18}, + {"(*Var).Anonymous", Method, 5}, + {"(*Var).Embedded", Method, 11}, + {"(*Var).Exported", Method, 5}, + {"(*Var).Id", Method, 5}, + {"(*Var).IsField", Method, 5}, + {"(*Var).Name", Method, 5}, + {"(*Var).Origin", Method, 19}, + {"(*Var).Parent", Method, 5}, + {"(*Var).Pkg", Method, 5}, + {"(*Var).Pos", Method, 5}, + {"(*Var).String", Method, 5}, + {"(*Var).Type", Method, 5}, + {"(Checker).ObjectOf", Method, 5}, + {"(Checker).PkgNameOf", Method, 22}, + {"(Checker).TypeOf", Method, 5}, + {"(Error).Error", Method, 5}, + {"(TypeAndValue).Addressable", Method, 5}, + {"(TypeAndValue).Assignable", Method, 5}, + {"(TypeAndValue).HasOk", Method, 5}, + {"(TypeAndValue).IsBuiltin", Method, 5}, + {"(TypeAndValue).IsNil", Method, 5}, + {"(TypeAndValue).IsType", Method, 5}, + {"(TypeAndValue).IsValue", Method, 5}, + {"(TypeAndValue).IsVoid", Method, 5}, + {"Alias", Type, 22}, + {"ArgumentError", Type, 18}, + {"ArgumentError.Err", Field, 18}, + {"ArgumentError.Index", Field, 18}, + {"Array", Type, 5}, + {"AssertableTo", Func, 5}, + {"AssignableTo", Func, 5}, + {"Basic", Type, 5}, + {"BasicInfo", Type, 5}, + {"BasicKind", Type, 5}, + {"Bool", Const, 5}, + {"Builtin", Type, 5}, + {"Byte", Const, 5}, + {"Chan", Type, 5}, + {"ChanDir", Type, 5}, + {"CheckExpr", Func, 13}, + {"Checker", Type, 5}, + {"Checker.Info", Field, 5}, + {"Comparable", Func, 5}, + {"Complex128", Const, 5}, + {"Complex64", Const, 5}, + {"Config", Type, 5}, + {"Config.Context", Field, 18}, + {"Config.DisableUnusedImportCheck", Field, 5}, + {"Config.Error", Field, 5}, + {"Config.FakeImportC", Field, 5}, + {"Config.GoVersion", Field, 18}, + {"Config.IgnoreFuncBodies", Field, 5}, + {"Config.Importer", Field, 5}, + {"Config.Sizes", Field, 5}, + {"Const", Type, 5}, + {"Context", Type, 18}, + {"ConvertibleTo", Func, 5}, + {"DefPredeclaredTestFuncs", Func, 5}, + {"Default", Func, 8}, + {"Error", Type, 5}, + {"Error.Fset", Field, 5}, + {"Error.Msg", Field, 5}, + {"Error.Pos", Field, 5}, + {"Error.Soft", Field, 5}, + {"Eval", Func, 5}, + {"ExprString", Func, 5}, + {"FieldVal", Const, 5}, + {"Float32", Const, 5}, + {"Float64", Const, 5}, + {"Func", Type, 5}, + {"Id", Func, 5}, + {"Identical", Func, 5}, + {"IdenticalIgnoreTags", Func, 8}, + {"Implements", Func, 5}, + {"ImportMode", Type, 6}, + {"Importer", Type, 5}, + {"ImporterFrom", Type, 6}, + {"Info", Type, 5}, + {"Info.Defs", Field, 5}, + {"Info.FileVersions", Field, 22}, + {"Info.Implicits", Field, 5}, + {"Info.InitOrder", Field, 5}, + {"Info.Instances", Field, 18}, + {"Info.Scopes", Field, 5}, + {"Info.Selections", Field, 5}, + {"Info.Types", Field, 5}, + {"Info.Uses", Field, 5}, + {"Initializer", Type, 5}, + {"Initializer.Lhs", Field, 5}, + {"Initializer.Rhs", Field, 5}, + {"Instance", Type, 18}, + {"Instance.Type", Field, 18}, + {"Instance.TypeArgs", Field, 18}, + {"Instantiate", Func, 18}, + {"Int", Const, 5}, + {"Int16", Const, 5}, + {"Int32", Const, 5}, + {"Int64", Const, 5}, + {"Int8", Const, 5}, + {"Interface", Type, 5}, + {"Invalid", Const, 5}, + {"IsBoolean", Const, 5}, + {"IsComplex", Const, 5}, + {"IsConstType", Const, 5}, + {"IsFloat", Const, 5}, + {"IsInteger", Const, 5}, + {"IsInterface", Func, 5}, + {"IsNumeric", Const, 5}, + {"IsOrdered", Const, 5}, + {"IsString", Const, 5}, + {"IsUnsigned", Const, 5}, + {"IsUntyped", Const, 5}, + {"Label", Type, 5}, + {"LookupFieldOrMethod", Func, 5}, + {"Map", Type, 5}, + {"MethodExpr", Const, 5}, + {"MethodSet", Type, 5}, + {"MethodVal", Const, 5}, + {"MissingMethod", Func, 5}, + {"Named", Type, 5}, + {"NewAlias", Func, 22}, + {"NewArray", Func, 5}, + {"NewChan", Func, 5}, + {"NewChecker", Func, 5}, + {"NewConst", Func, 5}, + {"NewContext", Func, 18}, + {"NewField", Func, 5}, + {"NewFunc", Func, 5}, + {"NewInterface", Func, 5}, + {"NewInterfaceType", Func, 11}, + {"NewLabel", Func, 5}, + {"NewMap", Func, 5}, + {"NewMethodSet", Func, 5}, + {"NewNamed", Func, 5}, + {"NewPackage", Func, 5}, + {"NewParam", Func, 5}, + {"NewPkgName", Func, 5}, + {"NewPointer", Func, 5}, + {"NewScope", Func, 5}, + {"NewSignature", Func, 5}, + {"NewSignatureType", Func, 18}, + {"NewSlice", Func, 5}, + {"NewStruct", Func, 5}, + {"NewTerm", Func, 18}, + {"NewTuple", Func, 5}, + {"NewTypeName", Func, 5}, + {"NewTypeParam", Func, 18}, + {"NewUnion", Func, 18}, + {"NewVar", Func, 5}, + {"Nil", Type, 5}, + {"Object", Type, 5}, + {"ObjectString", Func, 5}, + {"Package", Type, 5}, + {"PkgName", Type, 5}, + {"Pointer", Type, 5}, + {"Qualifier", Type, 5}, + {"RecvOnly", Const, 5}, + {"RelativeTo", Func, 5}, + {"Rune", Const, 5}, + {"Satisfies", Func, 20}, + {"Scope", Type, 5}, + {"Selection", Type, 5}, + {"SelectionKind", Type, 5}, + {"SelectionString", Func, 5}, + {"SendOnly", Const, 5}, + {"SendRecv", Const, 5}, + {"Signature", Type, 5}, + {"Sizes", Type, 5}, + {"SizesFor", Func, 9}, + {"Slice", Type, 5}, + {"StdSizes", Type, 5}, + {"StdSizes.MaxAlign", Field, 5}, + {"StdSizes.WordSize", Field, 5}, + {"String", Const, 5}, + {"Struct", Type, 5}, + {"Term", Type, 18}, + {"Tuple", Type, 5}, + {"Typ", Var, 5}, + {"Type", Type, 5}, + {"TypeAndValue", Type, 5}, + {"TypeAndValue.Type", Field, 5}, + {"TypeAndValue.Value", Field, 5}, + {"TypeList", Type, 18}, + {"TypeName", Type, 5}, + {"TypeParam", Type, 18}, + {"TypeParamList", Type, 18}, + {"TypeString", Func, 5}, + {"Uint", Const, 5}, + {"Uint16", Const, 5}, + {"Uint32", Const, 5}, + {"Uint64", Const, 5}, + {"Uint8", Const, 5}, + {"Uintptr", Const, 5}, + {"Unalias", Func, 22}, + {"Union", Type, 18}, + {"Universe", Var, 5}, + {"Unsafe", Var, 5}, + {"UnsafePointer", Const, 5}, + {"UntypedBool", Const, 5}, + {"UntypedComplex", Const, 5}, + {"UntypedFloat", Const, 5}, + {"UntypedInt", Const, 5}, + {"UntypedNil", Const, 5}, + {"UntypedRune", Const, 5}, + {"UntypedString", Const, 5}, + {"Var", Type, 5}, + {"WriteExpr", Func, 5}, + {"WriteSignature", Func, 5}, + {"WriteType", Func, 5}, + }, + "go/version": { + {"Compare", Func, 22}, + {"IsValid", Func, 22}, + {"Lang", Func, 22}, + }, + "hash": { + {"Hash", Type, 0}, + {"Hash32", Type, 0}, + {"Hash64", Type, 0}, + }, + "hash/adler32": { + {"Checksum", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + }, + "hash/crc32": { + {"Castagnoli", Const, 0}, + {"Checksum", Func, 0}, + {"ChecksumIEEE", Func, 0}, + {"IEEE", Const, 0}, + {"IEEETable", Var, 0}, + {"Koopman", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"NewIEEE", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/crc64": { + {"Checksum", Func, 0}, + {"ECMA", Const, 0}, + {"ISO", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/fnv": { + {"New128", Func, 9}, + {"New128a", Func, 9}, + {"New32", Func, 0}, + {"New32a", Func, 0}, + {"New64", Func, 0}, + {"New64a", Func, 0}, + }, + "hash/maphash": { + {"(*Hash).BlockSize", Method, 14}, + {"(*Hash).Reset", Method, 14}, + {"(*Hash).Seed", Method, 14}, + {"(*Hash).SetSeed", Method, 14}, + {"(*Hash).Size", Method, 14}, + {"(*Hash).Sum", Method, 14}, + {"(*Hash).Sum64", Method, 14}, + {"(*Hash).Write", Method, 14}, + {"(*Hash).WriteByte", Method, 14}, + {"(*Hash).WriteString", Method, 14}, + {"Bytes", Func, 19}, + {"Hash", Type, 14}, + {"MakeSeed", Func, 14}, + {"Seed", Type, 14}, + {"String", Func, 19}, + }, + "html": { + {"EscapeString", Func, 0}, + {"UnescapeString", Func, 0}, + }, + "html/template": { + {"(*Error).Error", Method, 0}, + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 6}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"CSS", Type, 0}, + {"ErrAmbigContext", Const, 0}, + {"ErrBadHTML", Const, 0}, + {"ErrBranchEnd", Const, 0}, + {"ErrEndContext", Const, 0}, + {"ErrJSTemplate", Const, 21}, + {"ErrNoSuchTemplate", Const, 0}, + {"ErrOutputContext", Const, 0}, + {"ErrPartialCharset", Const, 0}, + {"ErrPartialEscape", Const, 0}, + {"ErrPredefinedEscaper", Const, 9}, + {"ErrRangeLoopReentry", Const, 0}, + {"ErrSlashAmbig", Const, 0}, + {"Error", Type, 0}, + {"Error.Description", Field, 0}, + {"Error.ErrorCode", Field, 0}, + {"Error.Line", Field, 0}, + {"Error.Name", Field, 0}, + {"Error.Node", Field, 4}, + {"ErrorCode", Type, 0}, + {"FuncMap", Type, 0}, + {"HTML", Type, 0}, + {"HTMLAttr", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JS", Type, 0}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"JSStr", Type, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"OK", Const, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Srcset", Type, 10}, + {"Template", Type, 0}, + {"Template.Tree", Field, 2}, + {"URL", Type, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "image": { + {"(*Alpha).AlphaAt", Method, 4}, + {"(*Alpha).At", Method, 0}, + {"(*Alpha).Bounds", Method, 0}, + {"(*Alpha).ColorModel", Method, 0}, + {"(*Alpha).Opaque", Method, 0}, + {"(*Alpha).PixOffset", Method, 0}, + {"(*Alpha).RGBA64At", Method, 17}, + {"(*Alpha).Set", Method, 0}, + {"(*Alpha).SetAlpha", Method, 0}, + {"(*Alpha).SetRGBA64", Method, 17}, + {"(*Alpha).SubImage", Method, 0}, + {"(*Alpha16).Alpha16At", Method, 4}, + {"(*Alpha16).At", Method, 0}, + {"(*Alpha16).Bounds", Method, 0}, + {"(*Alpha16).ColorModel", Method, 0}, + {"(*Alpha16).Opaque", Method, 0}, + {"(*Alpha16).PixOffset", Method, 0}, + {"(*Alpha16).RGBA64At", Method, 17}, + {"(*Alpha16).Set", Method, 0}, + {"(*Alpha16).SetAlpha16", Method, 0}, + {"(*Alpha16).SetRGBA64", Method, 17}, + {"(*Alpha16).SubImage", Method, 0}, + {"(*CMYK).At", Method, 5}, + {"(*CMYK).Bounds", Method, 5}, + {"(*CMYK).CMYKAt", Method, 5}, + {"(*CMYK).ColorModel", Method, 5}, + {"(*CMYK).Opaque", Method, 5}, + {"(*CMYK).PixOffset", Method, 5}, + {"(*CMYK).RGBA64At", Method, 17}, + {"(*CMYK).Set", Method, 5}, + {"(*CMYK).SetCMYK", Method, 5}, + {"(*CMYK).SetRGBA64", Method, 17}, + {"(*CMYK).SubImage", Method, 5}, + {"(*Gray).At", Method, 0}, + {"(*Gray).Bounds", Method, 0}, + {"(*Gray).ColorModel", Method, 0}, + {"(*Gray).GrayAt", Method, 4}, + {"(*Gray).Opaque", Method, 0}, + {"(*Gray).PixOffset", Method, 0}, + {"(*Gray).RGBA64At", Method, 17}, + {"(*Gray).Set", Method, 0}, + {"(*Gray).SetGray", Method, 0}, + {"(*Gray).SetRGBA64", Method, 17}, + {"(*Gray).SubImage", Method, 0}, + {"(*Gray16).At", Method, 0}, + {"(*Gray16).Bounds", Method, 0}, + {"(*Gray16).ColorModel", Method, 0}, + {"(*Gray16).Gray16At", Method, 4}, + {"(*Gray16).Opaque", Method, 0}, + {"(*Gray16).PixOffset", Method, 0}, + {"(*Gray16).RGBA64At", Method, 17}, + {"(*Gray16).Set", Method, 0}, + {"(*Gray16).SetGray16", Method, 0}, + {"(*Gray16).SetRGBA64", Method, 17}, + {"(*Gray16).SubImage", Method, 0}, + {"(*NRGBA).At", Method, 0}, + {"(*NRGBA).Bounds", Method, 0}, + {"(*NRGBA).ColorModel", Method, 0}, + {"(*NRGBA).NRGBAAt", Method, 4}, + {"(*NRGBA).Opaque", Method, 0}, + {"(*NRGBA).PixOffset", Method, 0}, + {"(*NRGBA).RGBA64At", Method, 17}, + {"(*NRGBA).Set", Method, 0}, + {"(*NRGBA).SetNRGBA", Method, 0}, + {"(*NRGBA).SetRGBA64", Method, 17}, + {"(*NRGBA).SubImage", Method, 0}, + {"(*NRGBA64).At", Method, 0}, + {"(*NRGBA64).Bounds", Method, 0}, + {"(*NRGBA64).ColorModel", Method, 0}, + {"(*NRGBA64).NRGBA64At", Method, 4}, + {"(*NRGBA64).Opaque", Method, 0}, + {"(*NRGBA64).PixOffset", Method, 0}, + {"(*NRGBA64).RGBA64At", Method, 17}, + {"(*NRGBA64).Set", Method, 0}, + {"(*NRGBA64).SetNRGBA64", Method, 0}, + {"(*NRGBA64).SetRGBA64", Method, 17}, + {"(*NRGBA64).SubImage", Method, 0}, + {"(*NYCbCrA).AOffset", Method, 6}, + {"(*NYCbCrA).At", Method, 6}, + {"(*NYCbCrA).Bounds", Method, 6}, + {"(*NYCbCrA).COffset", Method, 6}, + {"(*NYCbCrA).ColorModel", Method, 6}, + {"(*NYCbCrA).NYCbCrAAt", Method, 6}, + {"(*NYCbCrA).Opaque", Method, 6}, + {"(*NYCbCrA).RGBA64At", Method, 17}, + {"(*NYCbCrA).SubImage", Method, 6}, + {"(*NYCbCrA).YCbCrAt", Method, 6}, + {"(*NYCbCrA).YOffset", Method, 6}, + {"(*Paletted).At", Method, 0}, + {"(*Paletted).Bounds", Method, 0}, + {"(*Paletted).ColorIndexAt", Method, 0}, + {"(*Paletted).ColorModel", Method, 0}, + {"(*Paletted).Opaque", Method, 0}, + {"(*Paletted).PixOffset", Method, 0}, + {"(*Paletted).RGBA64At", Method, 17}, + {"(*Paletted).Set", Method, 0}, + {"(*Paletted).SetColorIndex", Method, 0}, + {"(*Paletted).SetRGBA64", Method, 17}, + {"(*Paletted).SubImage", Method, 0}, + {"(*RGBA).At", Method, 0}, + {"(*RGBA).Bounds", Method, 0}, + {"(*RGBA).ColorModel", Method, 0}, + {"(*RGBA).Opaque", Method, 0}, + {"(*RGBA).PixOffset", Method, 0}, + {"(*RGBA).RGBA64At", Method, 17}, + {"(*RGBA).RGBAAt", Method, 4}, + {"(*RGBA).Set", Method, 0}, + {"(*RGBA).SetRGBA", Method, 0}, + {"(*RGBA).SetRGBA64", Method, 17}, + {"(*RGBA).SubImage", Method, 0}, + {"(*RGBA64).At", Method, 0}, + {"(*RGBA64).Bounds", Method, 0}, + {"(*RGBA64).ColorModel", Method, 0}, + {"(*RGBA64).Opaque", Method, 0}, + {"(*RGBA64).PixOffset", Method, 0}, + {"(*RGBA64).RGBA64At", Method, 4}, + {"(*RGBA64).Set", Method, 0}, + {"(*RGBA64).SetRGBA64", Method, 0}, + {"(*RGBA64).SubImage", Method, 0}, + {"(*Uniform).At", Method, 0}, + {"(*Uniform).Bounds", Method, 0}, + {"(*Uniform).ColorModel", Method, 0}, + {"(*Uniform).Convert", Method, 0}, + {"(*Uniform).Opaque", Method, 0}, + {"(*Uniform).RGBA", Method, 0}, + {"(*Uniform).RGBA64At", Method, 17}, + {"(*YCbCr).At", Method, 0}, + {"(*YCbCr).Bounds", Method, 0}, + {"(*YCbCr).COffset", Method, 0}, + {"(*YCbCr).ColorModel", Method, 0}, + {"(*YCbCr).Opaque", Method, 0}, + {"(*YCbCr).RGBA64At", Method, 17}, + {"(*YCbCr).SubImage", Method, 0}, + {"(*YCbCr).YCbCrAt", Method, 4}, + {"(*YCbCr).YOffset", Method, 0}, + {"(Point).Add", Method, 0}, + {"(Point).Div", Method, 0}, + {"(Point).Eq", Method, 0}, + {"(Point).In", Method, 0}, + {"(Point).Mod", Method, 0}, + {"(Point).Mul", Method, 0}, + {"(Point).String", Method, 0}, + {"(Point).Sub", Method, 0}, + {"(Rectangle).Add", Method, 0}, + {"(Rectangle).At", Method, 5}, + {"(Rectangle).Bounds", Method, 5}, + {"(Rectangle).Canon", Method, 0}, + {"(Rectangle).ColorModel", Method, 5}, + {"(Rectangle).Dx", Method, 0}, + {"(Rectangle).Dy", Method, 0}, + {"(Rectangle).Empty", Method, 0}, + {"(Rectangle).Eq", Method, 0}, + {"(Rectangle).In", Method, 0}, + {"(Rectangle).Inset", Method, 0}, + {"(Rectangle).Intersect", Method, 0}, + {"(Rectangle).Overlaps", Method, 0}, + {"(Rectangle).RGBA64At", Method, 17}, + {"(Rectangle).Size", Method, 0}, + {"(Rectangle).String", Method, 0}, + {"(Rectangle).Sub", Method, 0}, + {"(Rectangle).Union", Method, 0}, + {"(YCbCrSubsampleRatio).String", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.Pix", Field, 0}, + {"Alpha.Rect", Field, 0}, + {"Alpha.Stride", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.Pix", Field, 0}, + {"Alpha16.Rect", Field, 0}, + {"Alpha16.Stride", Field, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.Pix", Field, 5}, + {"CMYK.Rect", Field, 5}, + {"CMYK.Stride", Field, 5}, + {"Config", Type, 0}, + {"Config.ColorModel", Field, 0}, + {"Config.Height", Field, 0}, + {"Config.Width", Field, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"ErrFormat", Var, 0}, + {"Gray", Type, 0}, + {"Gray.Pix", Field, 0}, + {"Gray.Rect", Field, 0}, + {"Gray.Stride", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Pix", Field, 0}, + {"Gray16.Rect", Field, 0}, + {"Gray16.Stride", Field, 0}, + {"Image", Type, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.Pix", Field, 0}, + {"NRGBA.Rect", Field, 0}, + {"NRGBA.Stride", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.Pix", Field, 0}, + {"NRGBA64.Rect", Field, 0}, + {"NRGBA64.Stride", Field, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.AStride", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NewAlpha", Func, 0}, + {"NewAlpha16", Func, 0}, + {"NewCMYK", Func, 5}, + {"NewGray", Func, 0}, + {"NewGray16", Func, 0}, + {"NewNRGBA", Func, 0}, + {"NewNRGBA64", Func, 0}, + {"NewNYCbCrA", Func, 6}, + {"NewPaletted", Func, 0}, + {"NewRGBA", Func, 0}, + {"NewRGBA64", Func, 0}, + {"NewUniform", Func, 0}, + {"NewYCbCr", Func, 0}, + {"Opaque", Var, 0}, + {"Paletted", Type, 0}, + {"Paletted.Palette", Field, 0}, + {"Paletted.Pix", Field, 0}, + {"Paletted.Rect", Field, 0}, + {"Paletted.Stride", Field, 0}, + {"PalettedImage", Type, 0}, + {"Point", Type, 0}, + {"Point.X", Field, 0}, + {"Point.Y", Field, 0}, + {"Pt", Func, 0}, + {"RGBA", Type, 0}, + {"RGBA.Pix", Field, 0}, + {"RGBA.Rect", Field, 0}, + {"RGBA.Stride", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.Pix", Field, 0}, + {"RGBA64.Rect", Field, 0}, + {"RGBA64.Stride", Field, 0}, + {"RGBA64Image", Type, 17}, + {"Rect", Func, 0}, + {"Rectangle", Type, 0}, + {"Rectangle.Max", Field, 0}, + {"Rectangle.Min", Field, 0}, + {"RegisterFormat", Func, 0}, + {"Transparent", Var, 0}, + {"Uniform", Type, 0}, + {"Uniform.C", Field, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.CStride", Field, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Rect", Field, 0}, + {"YCbCr.SubsampleRatio", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCr.YStride", Field, 0}, + {"YCbCrSubsampleRatio", Type, 0}, + {"YCbCrSubsampleRatio410", Const, 5}, + {"YCbCrSubsampleRatio411", Const, 5}, + {"YCbCrSubsampleRatio420", Const, 0}, + {"YCbCrSubsampleRatio422", Const, 0}, + {"YCbCrSubsampleRatio440", Const, 1}, + {"YCbCrSubsampleRatio444", Const, 0}, + {"ZP", Var, 0}, + {"ZR", Var, 0}, + }, + "image/color": { + {"(Alpha).RGBA", Method, 0}, + {"(Alpha16).RGBA", Method, 0}, + {"(CMYK).RGBA", Method, 5}, + {"(Gray).RGBA", Method, 0}, + {"(Gray16).RGBA", Method, 0}, + {"(NRGBA).RGBA", Method, 0}, + {"(NRGBA64).RGBA", Method, 0}, + {"(NYCbCrA).RGBA", Method, 6}, + {"(Palette).Convert", Method, 0}, + {"(Palette).Index", Method, 0}, + {"(RGBA).RGBA", Method, 0}, + {"(RGBA64).RGBA", Method, 0}, + {"(YCbCr).RGBA", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.A", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.A", Field, 0}, + {"Alpha16Model", Var, 0}, + {"AlphaModel", Var, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.C", Field, 5}, + {"CMYK.K", Field, 5}, + {"CMYK.M", Field, 5}, + {"CMYK.Y", Field, 5}, + {"CMYKModel", Var, 5}, + {"CMYKToRGB", Func, 5}, + {"Color", Type, 0}, + {"Gray", Type, 0}, + {"Gray.Y", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Y", Field, 0}, + {"Gray16Model", Var, 0}, + {"GrayModel", Var, 0}, + {"Model", Type, 0}, + {"ModelFunc", Func, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.A", Field, 0}, + {"NRGBA.B", Field, 0}, + {"NRGBA.G", Field, 0}, + {"NRGBA.R", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.A", Field, 0}, + {"NRGBA64.B", Field, 0}, + {"NRGBA64.G", Field, 0}, + {"NRGBA64.R", Field, 0}, + {"NRGBA64Model", Var, 0}, + {"NRGBAModel", Var, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NYCbCrAModel", Var, 6}, + {"Opaque", Var, 0}, + {"Palette", Type, 0}, + {"RGBA", Type, 0}, + {"RGBA.A", Field, 0}, + {"RGBA.B", Field, 0}, + {"RGBA.G", Field, 0}, + {"RGBA.R", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.A", Field, 0}, + {"RGBA64.B", Field, 0}, + {"RGBA64.G", Field, 0}, + {"RGBA64.R", Field, 0}, + {"RGBA64Model", Var, 0}, + {"RGBAModel", Var, 0}, + {"RGBToCMYK", Func, 5}, + {"RGBToYCbCr", Func, 0}, + {"Transparent", Var, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCrModel", Var, 0}, + {"YCbCrToRGB", Func, 0}, + }, + "image/color/palette": { + {"Plan9", Var, 2}, + {"WebSafe", Var, 2}, + }, + "image/draw": { + {"(Op).Draw", Method, 2}, + {"Draw", Func, 0}, + {"DrawMask", Func, 0}, + {"Drawer", Type, 2}, + {"FloydSteinberg", Var, 2}, + {"Image", Type, 0}, + {"Op", Type, 0}, + {"Over", Const, 0}, + {"Quantizer", Type, 2}, + {"RGBA64Image", Type, 17}, + {"Src", Const, 0}, + }, + "image/gif": { + {"Decode", Func, 0}, + {"DecodeAll", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DisposalBackground", Const, 5}, + {"DisposalNone", Const, 5}, + {"DisposalPrevious", Const, 5}, + {"Encode", Func, 2}, + {"EncodeAll", Func, 2}, + {"GIF", Type, 0}, + {"GIF.BackgroundIndex", Field, 5}, + {"GIF.Config", Field, 5}, + {"GIF.Delay", Field, 0}, + {"GIF.Disposal", Field, 5}, + {"GIF.Image", Field, 0}, + {"GIF.LoopCount", Field, 0}, + {"Options", Type, 2}, + {"Options.Drawer", Field, 2}, + {"Options.NumColors", Field, 2}, + {"Options.Quantizer", Field, 2}, + }, + "image/jpeg": { + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultQuality", Const, 0}, + {"Encode", Func, 0}, + {"FormatError", Type, 0}, + {"Options", Type, 0}, + {"Options.Quality", Field, 0}, + {"Reader", Type, 0}, + {"UnsupportedError", Type, 0}, + }, + "image/png": { + {"(*Encoder).Encode", Method, 4}, + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"BestCompression", Const, 4}, + {"BestSpeed", Const, 4}, + {"CompressionLevel", Type, 4}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultCompression", Const, 4}, + {"Encode", Func, 0}, + {"Encoder", Type, 4}, + {"Encoder.BufferPool", Field, 9}, + {"Encoder.CompressionLevel", Field, 4}, + {"EncoderBuffer", Type, 9}, + {"EncoderBufferPool", Type, 9}, + {"FormatError", Type, 0}, + {"NoCompression", Const, 4}, + {"UnsupportedError", Type, 0}, + }, + "index/suffixarray": { + {"(*Index).Bytes", Method, 0}, + {"(*Index).FindAllIndex", Method, 0}, + {"(*Index).Lookup", Method, 0}, + {"(*Index).Read", Method, 0}, + {"(*Index).Write", Method, 0}, + {"Index", Type, 0}, + {"New", Func, 0}, + }, + "io": { + {"(*LimitedReader).Read", Method, 0}, + {"(*OffsetWriter).Seek", Method, 20}, + {"(*OffsetWriter).Write", Method, 20}, + {"(*OffsetWriter).WriteAt", Method, 20}, + {"(*PipeReader).Close", Method, 0}, + {"(*PipeReader).CloseWithError", Method, 0}, + {"(*PipeReader).Read", Method, 0}, + {"(*PipeWriter).Close", Method, 0}, + {"(*PipeWriter).CloseWithError", Method, 0}, + {"(*PipeWriter).Write", Method, 0}, + {"(*SectionReader).Outer", Method, 22}, + {"(*SectionReader).Read", Method, 0}, + {"(*SectionReader).ReadAt", Method, 0}, + {"(*SectionReader).Seek", Method, 0}, + {"(*SectionReader).Size", Method, 0}, + {"ByteReader", Type, 0}, + {"ByteScanner", Type, 0}, + {"ByteWriter", Type, 1}, + {"Closer", Type, 0}, + {"Copy", Func, 0}, + {"CopyBuffer", Func, 5}, + {"CopyN", Func, 0}, + {"Discard", Var, 16}, + {"EOF", Var, 0}, + {"ErrClosedPipe", Var, 0}, + {"ErrNoProgress", Var, 1}, + {"ErrShortBuffer", Var, 0}, + {"ErrShortWrite", Var, 0}, + {"ErrUnexpectedEOF", Var, 0}, + {"LimitReader", Func, 0}, + {"LimitedReader", Type, 0}, + {"LimitedReader.N", Field, 0}, + {"LimitedReader.R", Field, 0}, + {"MultiReader", Func, 0}, + {"MultiWriter", Func, 0}, + {"NewOffsetWriter", Func, 20}, + {"NewSectionReader", Func, 0}, + {"NopCloser", Func, 16}, + {"OffsetWriter", Type, 20}, + {"Pipe", Func, 0}, + {"PipeReader", Type, 0}, + {"PipeWriter", Type, 0}, + {"ReadAll", Func, 16}, + {"ReadAtLeast", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadFull", Func, 0}, + {"ReadSeekCloser", Type, 16}, + {"ReadSeeker", Type, 0}, + {"ReadWriteCloser", Type, 0}, + {"ReadWriteSeeker", Type, 0}, + {"ReadWriter", Type, 0}, + {"Reader", Type, 0}, + {"ReaderAt", Type, 0}, + {"ReaderFrom", Type, 0}, + {"RuneReader", Type, 0}, + {"RuneScanner", Type, 0}, + {"SectionReader", Type, 0}, + {"SeekCurrent", Const, 7}, + {"SeekEnd", Const, 7}, + {"SeekStart", Const, 7}, + {"Seeker", Type, 0}, + {"StringWriter", Type, 12}, + {"TeeReader", Func, 0}, + {"WriteCloser", Type, 0}, + {"WriteSeeker", Type, 0}, + {"WriteString", Func, 0}, + {"Writer", Type, 0}, + {"WriterAt", Type, 0}, + {"WriterTo", Type, 0}, + }, + "io/fs": { + {"(*PathError).Error", Method, 16}, + {"(*PathError).Timeout", Method, 16}, + {"(*PathError).Unwrap", Method, 16}, + {"(FileMode).IsDir", Method, 16}, + {"(FileMode).IsRegular", Method, 16}, + {"(FileMode).Perm", Method, 16}, + {"(FileMode).String", Method, 16}, + {"(FileMode).Type", Method, 16}, + {"DirEntry", Type, 16}, + {"ErrClosed", Var, 16}, + {"ErrExist", Var, 16}, + {"ErrInvalid", Var, 16}, + {"ErrNotExist", Var, 16}, + {"ErrPermission", Var, 16}, + {"FS", Type, 16}, + {"File", Type, 16}, + {"FileInfo", Type, 16}, + {"FileInfoToDirEntry", Func, 17}, + {"FileMode", Type, 16}, + {"FormatDirEntry", Func, 21}, + {"FormatFileInfo", Func, 21}, + {"Glob", Func, 16}, + {"GlobFS", Type, 16}, + {"ModeAppend", Const, 16}, + {"ModeCharDevice", Const, 16}, + {"ModeDevice", Const, 16}, + {"ModeDir", Const, 16}, + {"ModeExclusive", Const, 16}, + {"ModeIrregular", Const, 16}, + {"ModeNamedPipe", Const, 16}, + {"ModePerm", Const, 16}, + {"ModeSetgid", Const, 16}, + {"ModeSetuid", Const, 16}, + {"ModeSocket", Const, 16}, + {"ModeSticky", Const, 16}, + {"ModeSymlink", Const, 16}, + {"ModeTemporary", Const, 16}, + {"ModeType", Const, 16}, + {"PathError", Type, 16}, + {"PathError.Err", Field, 16}, + {"PathError.Op", Field, 16}, + {"PathError.Path", Field, 16}, + {"ReadDir", Func, 16}, + {"ReadDirFS", Type, 16}, + {"ReadDirFile", Type, 16}, + {"ReadFile", Func, 16}, + {"ReadFileFS", Type, 16}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 16}, + {"Stat", Func, 16}, + {"StatFS", Type, 16}, + {"Sub", Func, 16}, + {"SubFS", Type, 16}, + {"ValidPath", Func, 16}, + {"WalkDir", Func, 16}, + {"WalkDirFunc", Type, 16}, + }, + "io/ioutil": { + {"Discard", Var, 0}, + {"NopCloser", Func, 0}, + {"ReadAll", Func, 0}, + {"ReadDir", Func, 0}, + {"ReadFile", Func, 0}, + {"TempDir", Func, 0}, + {"TempFile", Func, 0}, + {"WriteFile", Func, 0}, + }, + "log": { + {"(*Logger).Fatal", Method, 0}, + {"(*Logger).Fatalf", Method, 0}, + {"(*Logger).Fatalln", Method, 0}, + {"(*Logger).Flags", Method, 0}, + {"(*Logger).Output", Method, 0}, + {"(*Logger).Panic", Method, 0}, + {"(*Logger).Panicf", Method, 0}, + {"(*Logger).Panicln", Method, 0}, + {"(*Logger).Prefix", Method, 0}, + {"(*Logger).Print", Method, 0}, + {"(*Logger).Printf", Method, 0}, + {"(*Logger).Println", Method, 0}, + {"(*Logger).SetFlags", Method, 0}, + {"(*Logger).SetOutput", Method, 5}, + {"(*Logger).SetPrefix", Method, 0}, + {"(*Logger).Writer", Method, 12}, + {"Default", Func, 16}, + {"Fatal", Func, 0}, + {"Fatalf", Func, 0}, + {"Fatalln", Func, 0}, + {"Flags", Func, 0}, + {"LUTC", Const, 5}, + {"Ldate", Const, 0}, + {"Llongfile", Const, 0}, + {"Lmicroseconds", Const, 0}, + {"Lmsgprefix", Const, 14}, + {"Logger", Type, 0}, + {"Lshortfile", Const, 0}, + {"LstdFlags", Const, 0}, + {"Ltime", Const, 0}, + {"New", Func, 0}, + {"Output", Func, 5}, + {"Panic", Func, 0}, + {"Panicf", Func, 0}, + {"Panicln", Func, 0}, + {"Prefix", Func, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"SetFlags", Func, 0}, + {"SetOutput", Func, 0}, + {"SetPrefix", Func, 0}, + {"Writer", Func, 13}, + }, + "log/slog": { + {"(*JSONHandler).Enabled", Method, 21}, + {"(*JSONHandler).Handle", Method, 21}, + {"(*JSONHandler).WithAttrs", Method, 21}, + {"(*JSONHandler).WithGroup", Method, 21}, + {"(*Level).UnmarshalJSON", Method, 21}, + {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).Level", Method, 21}, + {"(*LevelVar).MarshalText", Method, 21}, + {"(*LevelVar).Set", Method, 21}, + {"(*LevelVar).String", Method, 21}, + {"(*LevelVar).UnmarshalText", Method, 21}, + {"(*Logger).Debug", Method, 21}, + {"(*Logger).DebugContext", Method, 21}, + {"(*Logger).Enabled", Method, 21}, + {"(*Logger).Error", Method, 21}, + {"(*Logger).ErrorContext", Method, 21}, + {"(*Logger).Handler", Method, 21}, + {"(*Logger).Info", Method, 21}, + {"(*Logger).InfoContext", Method, 21}, + {"(*Logger).Log", Method, 21}, + {"(*Logger).LogAttrs", Method, 21}, + {"(*Logger).Warn", Method, 21}, + {"(*Logger).WarnContext", Method, 21}, + {"(*Logger).With", Method, 21}, + {"(*Logger).WithGroup", Method, 21}, + {"(*Record).Add", Method, 21}, + {"(*Record).AddAttrs", Method, 21}, + {"(*TextHandler).Enabled", Method, 21}, + {"(*TextHandler).Handle", Method, 21}, + {"(*TextHandler).WithAttrs", Method, 21}, + {"(*TextHandler).WithGroup", Method, 21}, + {"(Attr).Equal", Method, 21}, + {"(Attr).String", Method, 21}, + {"(Kind).String", Method, 21}, + {"(Level).Level", Method, 21}, + {"(Level).MarshalJSON", Method, 21}, + {"(Level).MarshalText", Method, 21}, + {"(Level).String", Method, 21}, + {"(Record).Attrs", Method, 21}, + {"(Record).Clone", Method, 21}, + {"(Record).NumAttrs", Method, 21}, + {"(Value).Any", Method, 21}, + {"(Value).Bool", Method, 21}, + {"(Value).Duration", Method, 21}, + {"(Value).Equal", Method, 21}, + {"(Value).Float64", Method, 21}, + {"(Value).Group", Method, 21}, + {"(Value).Int64", Method, 21}, + {"(Value).Kind", Method, 21}, + {"(Value).LogValuer", Method, 21}, + {"(Value).Resolve", Method, 21}, + {"(Value).String", Method, 21}, + {"(Value).Time", Method, 21}, + {"(Value).Uint64", Method, 21}, + {"Any", Func, 21}, + {"AnyValue", Func, 21}, + {"Attr", Type, 21}, + {"Attr.Key", Field, 21}, + {"Attr.Value", Field, 21}, + {"Bool", Func, 21}, + {"BoolValue", Func, 21}, + {"Debug", Func, 21}, + {"DebugContext", Func, 21}, + {"Default", Func, 21}, + {"Duration", Func, 21}, + {"DurationValue", Func, 21}, + {"Error", Func, 21}, + {"ErrorContext", Func, 21}, + {"Float64", Func, 21}, + {"Float64Value", Func, 21}, + {"Group", Func, 21}, + {"GroupValue", Func, 21}, + {"Handler", Type, 21}, + {"HandlerOptions", Type, 21}, + {"HandlerOptions.AddSource", Field, 21}, + {"HandlerOptions.Level", Field, 21}, + {"HandlerOptions.ReplaceAttr", Field, 21}, + {"Info", Func, 21}, + {"InfoContext", Func, 21}, + {"Int", Func, 21}, + {"Int64", Func, 21}, + {"Int64Value", Func, 21}, + {"IntValue", Func, 21}, + {"JSONHandler", Type, 21}, + {"Kind", Type, 21}, + {"KindAny", Const, 21}, + {"KindBool", Const, 21}, + {"KindDuration", Const, 21}, + {"KindFloat64", Const, 21}, + {"KindGroup", Const, 21}, + {"KindInt64", Const, 21}, + {"KindLogValuer", Const, 21}, + {"KindString", Const, 21}, + {"KindTime", Const, 21}, + {"KindUint64", Const, 21}, + {"Level", Type, 21}, + {"LevelDebug", Const, 21}, + {"LevelError", Const, 21}, + {"LevelInfo", Const, 21}, + {"LevelKey", Const, 21}, + {"LevelVar", Type, 21}, + {"LevelWarn", Const, 21}, + {"Leveler", Type, 21}, + {"Log", Func, 21}, + {"LogAttrs", Func, 21}, + {"LogValuer", Type, 21}, + {"Logger", Type, 21}, + {"MessageKey", Const, 21}, + {"New", Func, 21}, + {"NewJSONHandler", Func, 21}, + {"NewLogLogger", Func, 21}, + {"NewRecord", Func, 21}, + {"NewTextHandler", Func, 21}, + {"Record", Type, 21}, + {"Record.Level", Field, 21}, + {"Record.Message", Field, 21}, + {"Record.PC", Field, 21}, + {"Record.Time", Field, 21}, + {"SetDefault", Func, 21}, + {"SetLogLoggerLevel", Func, 22}, + {"Source", Type, 21}, + {"Source.File", Field, 21}, + {"Source.Function", Field, 21}, + {"Source.Line", Field, 21}, + {"SourceKey", Const, 21}, + {"String", Func, 21}, + {"StringValue", Func, 21}, + {"TextHandler", Type, 21}, + {"Time", Func, 21}, + {"TimeKey", Const, 21}, + {"TimeValue", Func, 21}, + {"Uint64", Func, 21}, + {"Uint64Value", Func, 21}, + {"Value", Type, 21}, + {"Warn", Func, 21}, + {"WarnContext", Func, 21}, + {"With", Func, 21}, + }, + "log/syslog": { + {"(*Writer).Alert", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Crit", Method, 0}, + {"(*Writer).Debug", Method, 0}, + {"(*Writer).Emerg", Method, 0}, + {"(*Writer).Err", Method, 0}, + {"(*Writer).Info", Method, 0}, + {"(*Writer).Notice", Method, 0}, + {"(*Writer).Warning", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"Dial", Func, 0}, + {"LOG_ALERT", Const, 0}, + {"LOG_AUTH", Const, 1}, + {"LOG_AUTHPRIV", Const, 1}, + {"LOG_CRIT", Const, 0}, + {"LOG_CRON", Const, 1}, + {"LOG_DAEMON", Const, 1}, + {"LOG_DEBUG", Const, 0}, + {"LOG_EMERG", Const, 0}, + {"LOG_ERR", Const, 0}, + {"LOG_FTP", Const, 1}, + {"LOG_INFO", Const, 0}, + {"LOG_KERN", Const, 1}, + {"LOG_LOCAL0", Const, 1}, + {"LOG_LOCAL1", Const, 1}, + {"LOG_LOCAL2", Const, 1}, + {"LOG_LOCAL3", Const, 1}, + {"LOG_LOCAL4", Const, 1}, + {"LOG_LOCAL5", Const, 1}, + {"LOG_LOCAL6", Const, 1}, + {"LOG_LOCAL7", Const, 1}, + {"LOG_LPR", Const, 1}, + {"LOG_MAIL", Const, 1}, + {"LOG_NEWS", Const, 1}, + {"LOG_NOTICE", Const, 0}, + {"LOG_SYSLOG", Const, 1}, + {"LOG_USER", Const, 1}, + {"LOG_UUCP", Const, 1}, + {"LOG_WARNING", Const, 0}, + {"New", Func, 0}, + {"NewLogger", Func, 0}, + {"Priority", Type, 0}, + {"Writer", Type, 0}, + }, + "maps": { + {"Clone", Func, 21}, + {"Copy", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + }, + "math": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atan2", Func, 0}, + {"Atanh", Func, 0}, + {"Cbrt", Func, 0}, + {"Ceil", Func, 0}, + {"Copysign", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Dim", Func, 0}, + {"E", Const, 0}, + {"Erf", Func, 0}, + {"Erfc", Func, 0}, + {"Erfcinv", Func, 10}, + {"Erfinv", Func, 10}, + {"Exp", Func, 0}, + {"Exp2", Func, 0}, + {"Expm1", Func, 0}, + {"FMA", Func, 14}, + {"Float32bits", Func, 0}, + {"Float32frombits", Func, 0}, + {"Float64bits", Func, 0}, + {"Float64frombits", Func, 0}, + {"Floor", Func, 0}, + {"Frexp", Func, 0}, + {"Gamma", Func, 0}, + {"Hypot", Func, 0}, + {"Ilogb", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"J0", Func, 0}, + {"J1", Func, 0}, + {"Jn", Func, 0}, + {"Ldexp", Func, 0}, + {"Lgamma", Func, 0}, + {"Ln10", Const, 0}, + {"Ln2", Const, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"Log10E", Const, 0}, + {"Log1p", Func, 0}, + {"Log2", Func, 0}, + {"Log2E", Const, 0}, + {"Logb", Func, 0}, + {"Max", Func, 0}, + {"MaxFloat32", Const, 0}, + {"MaxFloat64", Const, 0}, + {"MaxInt", Const, 17}, + {"MaxInt16", Const, 0}, + {"MaxInt32", Const, 0}, + {"MaxInt64", Const, 0}, + {"MaxInt8", Const, 0}, + {"MaxUint", Const, 17}, + {"MaxUint16", Const, 0}, + {"MaxUint32", Const, 0}, + {"MaxUint64", Const, 0}, + {"MaxUint8", Const, 0}, + {"Min", Func, 0}, + {"MinInt", Const, 17}, + {"MinInt16", Const, 0}, + {"MinInt32", Const, 0}, + {"MinInt64", Const, 0}, + {"MinInt8", Const, 0}, + {"Mod", Func, 0}, + {"Modf", Func, 0}, + {"NaN", Func, 0}, + {"Nextafter", Func, 0}, + {"Nextafter32", Func, 4}, + {"Phi", Const, 0}, + {"Pi", Const, 0}, + {"Pow", Func, 0}, + {"Pow10", Func, 0}, + {"Remainder", Func, 0}, + {"Round", Func, 10}, + {"RoundToEven", Func, 10}, + {"Signbit", Func, 0}, + {"Sin", Func, 0}, + {"Sincos", Func, 0}, + {"Sinh", Func, 0}, + {"SmallestNonzeroFloat32", Const, 0}, + {"SmallestNonzeroFloat64", Const, 0}, + {"Sqrt", Func, 0}, + {"Sqrt2", Const, 0}, + {"SqrtE", Const, 0}, + {"SqrtPhi", Const, 0}, + {"SqrtPi", Const, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + {"Trunc", Func, 0}, + {"Y0", Func, 0}, + {"Y1", Func, 0}, + {"Yn", Func, 0}, + }, + "math/big": { + {"(*Float).Abs", Method, 5}, + {"(*Float).Acc", Method, 5}, + {"(*Float).Add", Method, 5}, + {"(*Float).Append", Method, 5}, + {"(*Float).Cmp", Method, 5}, + {"(*Float).Copy", Method, 5}, + {"(*Float).Float32", Method, 5}, + {"(*Float).Float64", Method, 5}, + {"(*Float).Format", Method, 5}, + {"(*Float).GobDecode", Method, 7}, + {"(*Float).GobEncode", Method, 7}, + {"(*Float).Int", Method, 5}, + {"(*Float).Int64", Method, 5}, + {"(*Float).IsInf", Method, 5}, + {"(*Float).IsInt", Method, 5}, + {"(*Float).MantExp", Method, 5}, + {"(*Float).MarshalText", Method, 6}, + {"(*Float).MinPrec", Method, 5}, + {"(*Float).Mode", Method, 5}, + {"(*Float).Mul", Method, 5}, + {"(*Float).Neg", Method, 5}, + {"(*Float).Parse", Method, 5}, + {"(*Float).Prec", Method, 5}, + {"(*Float).Quo", Method, 5}, + {"(*Float).Rat", Method, 5}, + {"(*Float).Scan", Method, 8}, + {"(*Float).Set", Method, 5}, + {"(*Float).SetFloat64", Method, 5}, + {"(*Float).SetInf", Method, 5}, + {"(*Float).SetInt", Method, 5}, + {"(*Float).SetInt64", Method, 5}, + {"(*Float).SetMantExp", Method, 5}, + {"(*Float).SetMode", Method, 5}, + {"(*Float).SetPrec", Method, 5}, + {"(*Float).SetRat", Method, 5}, + {"(*Float).SetString", Method, 5}, + {"(*Float).SetUint64", Method, 5}, + {"(*Float).Sign", Method, 5}, + {"(*Float).Signbit", Method, 5}, + {"(*Float).Sqrt", Method, 10}, + {"(*Float).String", Method, 5}, + {"(*Float).Sub", Method, 5}, + {"(*Float).Text", Method, 5}, + {"(*Float).Uint64", Method, 5}, + {"(*Float).UnmarshalText", Method, 6}, + {"(*Int).Abs", Method, 0}, + {"(*Int).Add", Method, 0}, + {"(*Int).And", Method, 0}, + {"(*Int).AndNot", Method, 0}, + {"(*Int).Append", Method, 6}, + {"(*Int).Binomial", Method, 0}, + {"(*Int).Bit", Method, 0}, + {"(*Int).BitLen", Method, 0}, + {"(*Int).Bits", Method, 0}, + {"(*Int).Bytes", Method, 0}, + {"(*Int).Cmp", Method, 0}, + {"(*Int).CmpAbs", Method, 10}, + {"(*Int).Div", Method, 0}, + {"(*Int).DivMod", Method, 0}, + {"(*Int).Exp", Method, 0}, + {"(*Int).FillBytes", Method, 15}, + {"(*Int).Float64", Method, 21}, + {"(*Int).Format", Method, 0}, + {"(*Int).GCD", Method, 0}, + {"(*Int).GobDecode", Method, 0}, + {"(*Int).GobEncode", Method, 0}, + {"(*Int).Int64", Method, 0}, + {"(*Int).IsInt64", Method, 9}, + {"(*Int).IsUint64", Method, 9}, + {"(*Int).Lsh", Method, 0}, + {"(*Int).MarshalJSON", Method, 1}, + {"(*Int).MarshalText", Method, 3}, + {"(*Int).Mod", Method, 0}, + {"(*Int).ModInverse", Method, 0}, + {"(*Int).ModSqrt", Method, 5}, + {"(*Int).Mul", Method, 0}, + {"(*Int).MulRange", Method, 0}, + {"(*Int).Neg", Method, 0}, + {"(*Int).Not", Method, 0}, + {"(*Int).Or", Method, 0}, + {"(*Int).ProbablyPrime", Method, 0}, + {"(*Int).Quo", Method, 0}, + {"(*Int).QuoRem", Method, 0}, + {"(*Int).Rand", Method, 0}, + {"(*Int).Rem", Method, 0}, + {"(*Int).Rsh", Method, 0}, + {"(*Int).Scan", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).SetBit", Method, 0}, + {"(*Int).SetBits", Method, 0}, + {"(*Int).SetBytes", Method, 0}, + {"(*Int).SetInt64", Method, 0}, + {"(*Int).SetString", Method, 0}, + {"(*Int).SetUint64", Method, 1}, + {"(*Int).Sign", Method, 0}, + {"(*Int).Sqrt", Method, 8}, + {"(*Int).String", Method, 0}, + {"(*Int).Sub", Method, 0}, + {"(*Int).Text", Method, 6}, + {"(*Int).TrailingZeroBits", Method, 13}, + {"(*Int).Uint64", Method, 1}, + {"(*Int).UnmarshalJSON", Method, 1}, + {"(*Int).UnmarshalText", Method, 3}, + {"(*Int).Xor", Method, 0}, + {"(*Rat).Abs", Method, 0}, + {"(*Rat).Add", Method, 0}, + {"(*Rat).Cmp", Method, 0}, + {"(*Rat).Denom", Method, 0}, + {"(*Rat).Float32", Method, 4}, + {"(*Rat).Float64", Method, 1}, + {"(*Rat).FloatPrec", Method, 22}, + {"(*Rat).FloatString", Method, 0}, + {"(*Rat).GobDecode", Method, 0}, + {"(*Rat).GobEncode", Method, 0}, + {"(*Rat).Inv", Method, 0}, + {"(*Rat).IsInt", Method, 0}, + {"(*Rat).MarshalText", Method, 3}, + {"(*Rat).Mul", Method, 0}, + {"(*Rat).Neg", Method, 0}, + {"(*Rat).Num", Method, 0}, + {"(*Rat).Quo", Method, 0}, + {"(*Rat).RatString", Method, 0}, + {"(*Rat).Scan", Method, 0}, + {"(*Rat).Set", Method, 0}, + {"(*Rat).SetFloat64", Method, 1}, + {"(*Rat).SetFrac", Method, 0}, + {"(*Rat).SetFrac64", Method, 0}, + {"(*Rat).SetInt", Method, 0}, + {"(*Rat).SetInt64", Method, 0}, + {"(*Rat).SetString", Method, 0}, + {"(*Rat).SetUint64", Method, 13}, + {"(*Rat).Sign", Method, 0}, + {"(*Rat).String", Method, 0}, + {"(*Rat).Sub", Method, 0}, + {"(*Rat).UnmarshalText", Method, 3}, + {"(Accuracy).String", Method, 5}, + {"(ErrNaN).Error", Method, 5}, + {"(RoundingMode).String", Method, 5}, + {"Above", Const, 5}, + {"Accuracy", Type, 5}, + {"AwayFromZero", Const, 5}, + {"Below", Const, 5}, + {"ErrNaN", Type, 5}, + {"Exact", Const, 5}, + {"Float", Type, 5}, + {"Int", Type, 0}, + {"Jacobi", Func, 5}, + {"MaxBase", Const, 0}, + {"MaxExp", Const, 5}, + {"MaxPrec", Const, 5}, + {"MinExp", Const, 5}, + {"NewFloat", Func, 5}, + {"NewInt", Func, 0}, + {"NewRat", Func, 0}, + {"ParseFloat", Func, 5}, + {"Rat", Type, 0}, + {"RoundingMode", Type, 5}, + {"ToNearestAway", Const, 5}, + {"ToNearestEven", Const, 5}, + {"ToNegativeInf", Const, 5}, + {"ToPositiveInf", Const, 5}, + {"ToZero", Const, 5}, + {"Word", Type, 0}, + }, + "math/bits": { + {"Add", Func, 12}, + {"Add32", Func, 12}, + {"Add64", Func, 12}, + {"Div", Func, 12}, + {"Div32", Func, 12}, + {"Div64", Func, 12}, + {"LeadingZeros", Func, 9}, + {"LeadingZeros16", Func, 9}, + {"LeadingZeros32", Func, 9}, + {"LeadingZeros64", Func, 9}, + {"LeadingZeros8", Func, 9}, + {"Len", Func, 9}, + {"Len16", Func, 9}, + {"Len32", Func, 9}, + {"Len64", Func, 9}, + {"Len8", Func, 9}, + {"Mul", Func, 12}, + {"Mul32", Func, 12}, + {"Mul64", Func, 12}, + {"OnesCount", Func, 9}, + {"OnesCount16", Func, 9}, + {"OnesCount32", Func, 9}, + {"OnesCount64", Func, 9}, + {"OnesCount8", Func, 9}, + {"Rem", Func, 14}, + {"Rem32", Func, 14}, + {"Rem64", Func, 14}, + {"Reverse", Func, 9}, + {"Reverse16", Func, 9}, + {"Reverse32", Func, 9}, + {"Reverse64", Func, 9}, + {"Reverse8", Func, 9}, + {"ReverseBytes", Func, 9}, + {"ReverseBytes16", Func, 9}, + {"ReverseBytes32", Func, 9}, + {"ReverseBytes64", Func, 9}, + {"RotateLeft", Func, 9}, + {"RotateLeft16", Func, 9}, + {"RotateLeft32", Func, 9}, + {"RotateLeft64", Func, 9}, + {"RotateLeft8", Func, 9}, + {"Sub", Func, 12}, + {"Sub32", Func, 12}, + {"Sub64", Func, 12}, + {"TrailingZeros", Func, 9}, + {"TrailingZeros16", Func, 9}, + {"TrailingZeros32", Func, 9}, + {"TrailingZeros64", Func, 9}, + {"TrailingZeros8", Func, 9}, + {"UintSize", Const, 9}, + }, + "math/cmplx": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atanh", Func, 0}, + {"Conj", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Cot", Func, 0}, + {"Exp", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"NaN", Func, 0}, + {"Phase", Func, 0}, + {"Polar", Func, 0}, + {"Pow", Func, 0}, + {"Rect", Func, 0}, + {"Sin", Func, 0}, + {"Sinh", Func, 0}, + {"Sqrt", Func, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + }, + "math/rand": { + {"(*Rand).ExpFloat64", Method, 0}, + {"(*Rand).Float32", Method, 0}, + {"(*Rand).Float64", Method, 0}, + {"(*Rand).Int", Method, 0}, + {"(*Rand).Int31", Method, 0}, + {"(*Rand).Int31n", Method, 0}, + {"(*Rand).Int63", Method, 0}, + {"(*Rand).Int63n", Method, 0}, + {"(*Rand).Intn", Method, 0}, + {"(*Rand).NormFloat64", Method, 0}, + {"(*Rand).Perm", Method, 0}, + {"(*Rand).Read", Method, 6}, + {"(*Rand).Seed", Method, 0}, + {"(*Rand).Shuffle", Method, 10}, + {"(*Rand).Uint32", Method, 0}, + {"(*Rand).Uint64", Method, 8}, + {"(*Zipf).Uint64", Method, 0}, + {"ExpFloat64", Func, 0}, + {"Float32", Func, 0}, + {"Float64", Func, 0}, + {"Int", Func, 0}, + {"Int31", Func, 0}, + {"Int31n", Func, 0}, + {"Int63", Func, 0}, + {"Int63n", Func, 0}, + {"Intn", Func, 0}, + {"New", Func, 0}, + {"NewSource", Func, 0}, + {"NewZipf", Func, 0}, + {"NormFloat64", Func, 0}, + {"Perm", Func, 0}, + {"Rand", Type, 0}, + {"Read", Func, 6}, + {"Seed", Func, 0}, + {"Shuffle", Func, 10}, + {"Source", Type, 0}, + {"Source64", Type, 8}, + {"Uint32", Func, 0}, + {"Uint64", Func, 8}, + {"Zipf", Type, 0}, + }, + "math/rand/v2": { + {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Seed", Method, 22}, + {"(*ChaCha8).Uint64", Method, 22}, + {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).MarshalBinary", Method, 22}, + {"(*PCG).Seed", Method, 22}, + {"(*PCG).Uint64", Method, 22}, + {"(*PCG).UnmarshalBinary", Method, 22}, + {"(*Rand).ExpFloat64", Method, 22}, + {"(*Rand).Float32", Method, 22}, + {"(*Rand).Float64", Method, 22}, + {"(*Rand).Int", Method, 22}, + {"(*Rand).Int32", Method, 22}, + {"(*Rand).Int32N", Method, 22}, + {"(*Rand).Int64", Method, 22}, + {"(*Rand).Int64N", Method, 22}, + {"(*Rand).IntN", Method, 22}, + {"(*Rand).NormFloat64", Method, 22}, + {"(*Rand).Perm", Method, 22}, + {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint32", Method, 22}, + {"(*Rand).Uint32N", Method, 22}, + {"(*Rand).Uint64", Method, 22}, + {"(*Rand).Uint64N", Method, 22}, + {"(*Rand).UintN", Method, 22}, + {"(*Zipf).Uint64", Method, 22}, + {"ChaCha8", Type, 22}, + {"ExpFloat64", Func, 22}, + {"Float32", Func, 22}, + {"Float64", Func, 22}, + {"Int", Func, 22}, + {"Int32", Func, 22}, + {"Int32N", Func, 22}, + {"Int64", Func, 22}, + {"Int64N", Func, 22}, + {"IntN", Func, 22}, + {"N", Func, 22}, + {"New", Func, 22}, + {"NewChaCha8", Func, 22}, + {"NewPCG", Func, 22}, + {"NewZipf", Func, 22}, + {"NormFloat64", Func, 22}, + {"PCG", Type, 22}, + {"Perm", Func, 22}, + {"Rand", Type, 22}, + {"Shuffle", Func, 22}, + {"Source", Type, 22}, + {"Uint32", Func, 22}, + {"Uint32N", Func, 22}, + {"Uint64", Func, 22}, + {"Uint64N", Func, 22}, + {"UintN", Func, 22}, + {"Zipf", Type, 22}, + }, + "mime": { + {"(*WordDecoder).Decode", Method, 5}, + {"(*WordDecoder).DecodeHeader", Method, 5}, + {"(WordEncoder).Encode", Method, 5}, + {"AddExtensionType", Func, 0}, + {"BEncoding", Const, 5}, + {"ErrInvalidMediaParameter", Var, 9}, + {"ExtensionsByType", Func, 5}, + {"FormatMediaType", Func, 0}, + {"ParseMediaType", Func, 0}, + {"QEncoding", Const, 5}, + {"TypeByExtension", Func, 0}, + {"WordDecoder", Type, 5}, + {"WordDecoder.CharsetReader", Field, 5}, + {"WordEncoder", Type, 5}, + }, + "mime/multipart": { + {"(*FileHeader).Open", Method, 0}, + {"(*Form).RemoveAll", Method, 0}, + {"(*Part).Close", Method, 0}, + {"(*Part).FileName", Method, 0}, + {"(*Part).FormName", Method, 0}, + {"(*Part).Read", Method, 0}, + {"(*Reader).NextPart", Method, 0}, + {"(*Reader).NextRawPart", Method, 14}, + {"(*Reader).ReadForm", Method, 0}, + {"(*Writer).Boundary", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).CreateFormField", Method, 0}, + {"(*Writer).CreateFormFile", Method, 0}, + {"(*Writer).CreatePart", Method, 0}, + {"(*Writer).FormDataContentType", Method, 0}, + {"(*Writer).SetBoundary", Method, 1}, + {"(*Writer).WriteField", Method, 0}, + {"ErrMessageTooLarge", Var, 9}, + {"File", Type, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Filename", Field, 0}, + {"FileHeader.Header", Field, 0}, + {"FileHeader.Size", Field, 9}, + {"Form", Type, 0}, + {"Form.File", Field, 0}, + {"Form.Value", Field, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Part", Type, 0}, + {"Part.Header", Field, 0}, + {"Reader", Type, 0}, + {"Writer", Type, 0}, + }, + "mime/quotedprintable": { + {"(*Reader).Read", Method, 5}, + {"(*Writer).Close", Method, 5}, + {"(*Writer).Write", Method, 5}, + {"NewReader", Func, 5}, + {"NewWriter", Func, 5}, + {"Reader", Type, 5}, + {"Writer", Type, 5}, + {"Writer.Binary", Field, 5}, + }, + "net": { + {"(*AddrError).Error", Method, 0}, + {"(*AddrError).Temporary", Method, 0}, + {"(*AddrError).Timeout", Method, 0}, + {"(*Buffers).Read", Method, 8}, + {"(*Buffers).WriteTo", Method, 8}, + {"(*DNSConfigError).Error", Method, 0}, + {"(*DNSConfigError).Temporary", Method, 0}, + {"(*DNSConfigError).Timeout", Method, 0}, + {"(*DNSConfigError).Unwrap", Method, 13}, + {"(*DNSError).Error", Method, 0}, + {"(*DNSError).Temporary", Method, 0}, + {"(*DNSError).Timeout", Method, 0}, + {"(*Dialer).Dial", Method, 1}, + {"(*Dialer).DialContext", Method, 7}, + {"(*Dialer).MultipathTCP", Method, 21}, + {"(*Dialer).SetMultipathTCP", Method, 21}, + {"(*IP).UnmarshalText", Method, 2}, + {"(*IPAddr).Network", Method, 0}, + {"(*IPAddr).String", Method, 0}, + {"(*IPConn).Close", Method, 0}, + {"(*IPConn).File", Method, 0}, + {"(*IPConn).LocalAddr", Method, 0}, + {"(*IPConn).Read", Method, 0}, + {"(*IPConn).ReadFrom", Method, 0}, + {"(*IPConn).ReadFromIP", Method, 0}, + {"(*IPConn).ReadMsgIP", Method, 1}, + {"(*IPConn).RemoteAddr", Method, 0}, + {"(*IPConn).SetDeadline", Method, 0}, + {"(*IPConn).SetReadBuffer", Method, 0}, + {"(*IPConn).SetReadDeadline", Method, 0}, + {"(*IPConn).SetWriteBuffer", Method, 0}, + {"(*IPConn).SetWriteDeadline", Method, 0}, + {"(*IPConn).SyscallConn", Method, 9}, + {"(*IPConn).Write", Method, 0}, + {"(*IPConn).WriteMsgIP", Method, 1}, + {"(*IPConn).WriteTo", Method, 0}, + {"(*IPConn).WriteToIP", Method, 0}, + {"(*IPNet).Contains", Method, 0}, + {"(*IPNet).Network", Method, 0}, + {"(*IPNet).String", Method, 0}, + {"(*Interface).Addrs", Method, 0}, + {"(*Interface).MulticastAddrs", Method, 0}, + {"(*ListenConfig).Listen", Method, 11}, + {"(*ListenConfig).ListenPacket", Method, 11}, + {"(*ListenConfig).MultipathTCP", Method, 21}, + {"(*ListenConfig).SetMultipathTCP", Method, 21}, + {"(*OpError).Error", Method, 0}, + {"(*OpError).Temporary", Method, 0}, + {"(*OpError).Timeout", Method, 0}, + {"(*OpError).Unwrap", Method, 13}, + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Temporary", Method, 17}, + {"(*ParseError).Timeout", Method, 17}, + {"(*Resolver).LookupAddr", Method, 8}, + {"(*Resolver).LookupCNAME", Method, 8}, + {"(*Resolver).LookupHost", Method, 8}, + {"(*Resolver).LookupIP", Method, 15}, + {"(*Resolver).LookupIPAddr", Method, 8}, + {"(*Resolver).LookupMX", Method, 8}, + {"(*Resolver).LookupNS", Method, 8}, + {"(*Resolver).LookupNetIP", Method, 18}, + {"(*Resolver).LookupPort", Method, 8}, + {"(*Resolver).LookupSRV", Method, 8}, + {"(*Resolver).LookupTXT", Method, 8}, + {"(*TCPAddr).AddrPort", Method, 18}, + {"(*TCPAddr).Network", Method, 0}, + {"(*TCPAddr).String", Method, 0}, + {"(*TCPConn).Close", Method, 0}, + {"(*TCPConn).CloseRead", Method, 0}, + {"(*TCPConn).CloseWrite", Method, 0}, + {"(*TCPConn).File", Method, 0}, + {"(*TCPConn).LocalAddr", Method, 0}, + {"(*TCPConn).MultipathTCP", Method, 21}, + {"(*TCPConn).Read", Method, 0}, + {"(*TCPConn).ReadFrom", Method, 0}, + {"(*TCPConn).RemoteAddr", Method, 0}, + {"(*TCPConn).SetDeadline", Method, 0}, + {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, + {"(*TCPConn).SetLinger", Method, 0}, + {"(*TCPConn).SetNoDelay", Method, 0}, + {"(*TCPConn).SetReadBuffer", Method, 0}, + {"(*TCPConn).SetReadDeadline", Method, 0}, + {"(*TCPConn).SetWriteBuffer", Method, 0}, + {"(*TCPConn).SetWriteDeadline", Method, 0}, + {"(*TCPConn).SyscallConn", Method, 9}, + {"(*TCPConn).Write", Method, 0}, + {"(*TCPConn).WriteTo", Method, 22}, + {"(*TCPListener).Accept", Method, 0}, + {"(*TCPListener).AcceptTCP", Method, 0}, + {"(*TCPListener).Addr", Method, 0}, + {"(*TCPListener).Close", Method, 0}, + {"(*TCPListener).File", Method, 0}, + {"(*TCPListener).SetDeadline", Method, 0}, + {"(*TCPListener).SyscallConn", Method, 10}, + {"(*UDPAddr).AddrPort", Method, 18}, + {"(*UDPAddr).Network", Method, 0}, + {"(*UDPAddr).String", Method, 0}, + {"(*UDPConn).Close", Method, 0}, + {"(*UDPConn).File", Method, 0}, + {"(*UDPConn).LocalAddr", Method, 0}, + {"(*UDPConn).Read", Method, 0}, + {"(*UDPConn).ReadFrom", Method, 0}, + {"(*UDPConn).ReadFromUDP", Method, 0}, + {"(*UDPConn).ReadFromUDPAddrPort", Method, 18}, + {"(*UDPConn).ReadMsgUDP", Method, 1}, + {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).RemoteAddr", Method, 0}, + {"(*UDPConn).SetDeadline", Method, 0}, + {"(*UDPConn).SetReadBuffer", Method, 0}, + {"(*UDPConn).SetReadDeadline", Method, 0}, + {"(*UDPConn).SetWriteBuffer", Method, 0}, + {"(*UDPConn).SetWriteDeadline", Method, 0}, + {"(*UDPConn).SyscallConn", Method, 9}, + {"(*UDPConn).Write", Method, 0}, + {"(*UDPConn).WriteMsgUDP", Method, 1}, + {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).WriteTo", Method, 0}, + {"(*UDPConn).WriteToUDP", Method, 0}, + {"(*UDPConn).WriteToUDPAddrPort", Method, 18}, + {"(*UnixAddr).Network", Method, 0}, + {"(*UnixAddr).String", Method, 0}, + {"(*UnixConn).Close", Method, 0}, + {"(*UnixConn).CloseRead", Method, 1}, + {"(*UnixConn).CloseWrite", Method, 1}, + {"(*UnixConn).File", Method, 0}, + {"(*UnixConn).LocalAddr", Method, 0}, + {"(*UnixConn).Read", Method, 0}, + {"(*UnixConn).ReadFrom", Method, 0}, + {"(*UnixConn).ReadFromUnix", Method, 0}, + {"(*UnixConn).ReadMsgUnix", Method, 0}, + {"(*UnixConn).RemoteAddr", Method, 0}, + {"(*UnixConn).SetDeadline", Method, 0}, + {"(*UnixConn).SetReadBuffer", Method, 0}, + {"(*UnixConn).SetReadDeadline", Method, 0}, + {"(*UnixConn).SetWriteBuffer", Method, 0}, + {"(*UnixConn).SetWriteDeadline", Method, 0}, + {"(*UnixConn).SyscallConn", Method, 9}, + {"(*UnixConn).Write", Method, 0}, + {"(*UnixConn).WriteMsgUnix", Method, 0}, + {"(*UnixConn).WriteTo", Method, 0}, + {"(*UnixConn).WriteToUnix", Method, 0}, + {"(*UnixListener).Accept", Method, 0}, + {"(*UnixListener).AcceptUnix", Method, 0}, + {"(*UnixListener).Addr", Method, 0}, + {"(*UnixListener).Close", Method, 0}, + {"(*UnixListener).File", Method, 0}, + {"(*UnixListener).SetDeadline", Method, 0}, + {"(*UnixListener).SetUnlinkOnClose", Method, 8}, + {"(*UnixListener).SyscallConn", Method, 10}, + {"(Flags).String", Method, 0}, + {"(HardwareAddr).String", Method, 0}, + {"(IP).DefaultMask", Method, 0}, + {"(IP).Equal", Method, 0}, + {"(IP).IsGlobalUnicast", Method, 0}, + {"(IP).IsInterfaceLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalUnicast", Method, 0}, + {"(IP).IsLoopback", Method, 0}, + {"(IP).IsMulticast", Method, 0}, + {"(IP).IsPrivate", Method, 17}, + {"(IP).IsUnspecified", Method, 0}, + {"(IP).MarshalText", Method, 2}, + {"(IP).Mask", Method, 0}, + {"(IP).String", Method, 0}, + {"(IP).To16", Method, 0}, + {"(IP).To4", Method, 0}, + {"(IPMask).Size", Method, 0}, + {"(IPMask).String", Method, 0}, + {"(InvalidAddrError).Error", Method, 0}, + {"(InvalidAddrError).Temporary", Method, 0}, + {"(InvalidAddrError).Timeout", Method, 0}, + {"(UnknownNetworkError).Error", Method, 0}, + {"(UnknownNetworkError).Temporary", Method, 0}, + {"(UnknownNetworkError).Timeout", Method, 0}, + {"Addr", Type, 0}, + {"AddrError", Type, 0}, + {"AddrError.Addr", Field, 0}, + {"AddrError.Err", Field, 0}, + {"Buffers", Type, 8}, + {"CIDRMask", Func, 0}, + {"Conn", Type, 0}, + {"DNSConfigError", Type, 0}, + {"DNSConfigError.Err", Field, 0}, + {"DNSError", Type, 0}, + {"DNSError.Err", Field, 0}, + {"DNSError.IsNotFound", Field, 13}, + {"DNSError.IsTemporary", Field, 6}, + {"DNSError.IsTimeout", Field, 0}, + {"DNSError.Name", Field, 0}, + {"DNSError.Server", Field, 0}, + {"DefaultResolver", Var, 8}, + {"Dial", Func, 0}, + {"DialIP", Func, 0}, + {"DialTCP", Func, 0}, + {"DialTimeout", Func, 0}, + {"DialUDP", Func, 0}, + {"DialUnix", Func, 0}, + {"Dialer", Type, 1}, + {"Dialer.Cancel", Field, 6}, + {"Dialer.Control", Field, 11}, + {"Dialer.ControlContext", Field, 20}, + {"Dialer.Deadline", Field, 1}, + {"Dialer.DualStack", Field, 2}, + {"Dialer.FallbackDelay", Field, 5}, + {"Dialer.KeepAlive", Field, 3}, + {"Dialer.LocalAddr", Field, 1}, + {"Dialer.Resolver", Field, 8}, + {"Dialer.Timeout", Field, 1}, + {"ErrClosed", Var, 16}, + {"ErrWriteToConnected", Var, 0}, + {"Error", Type, 0}, + {"FileConn", Func, 0}, + {"FileListener", Func, 0}, + {"FilePacketConn", Func, 0}, + {"FlagBroadcast", Const, 0}, + {"FlagLoopback", Const, 0}, + {"FlagMulticast", Const, 0}, + {"FlagPointToPoint", Const, 0}, + {"FlagRunning", Const, 20}, + {"FlagUp", Const, 0}, + {"Flags", Type, 0}, + {"HardwareAddr", Type, 0}, + {"IP", Type, 0}, + {"IPAddr", Type, 0}, + {"IPAddr.IP", Field, 0}, + {"IPAddr.Zone", Field, 1}, + {"IPConn", Type, 0}, + {"IPMask", Type, 0}, + {"IPNet", Type, 0}, + {"IPNet.IP", Field, 0}, + {"IPNet.Mask", Field, 0}, + {"IPv4", Func, 0}, + {"IPv4Mask", Func, 0}, + {"IPv4allrouter", Var, 0}, + {"IPv4allsys", Var, 0}, + {"IPv4bcast", Var, 0}, + {"IPv4len", Const, 0}, + {"IPv4zero", Var, 0}, + {"IPv6interfacelocalallnodes", Var, 0}, + {"IPv6len", Const, 0}, + {"IPv6linklocalallnodes", Var, 0}, + {"IPv6linklocalallrouters", Var, 0}, + {"IPv6loopback", Var, 0}, + {"IPv6unspecified", Var, 0}, + {"IPv6zero", Var, 0}, + {"Interface", Type, 0}, + {"Interface.Flags", Field, 0}, + {"Interface.HardwareAddr", Field, 0}, + {"Interface.Index", Field, 0}, + {"Interface.MTU", Field, 0}, + {"Interface.Name", Field, 0}, + {"InterfaceAddrs", Func, 0}, + {"InterfaceByIndex", Func, 0}, + {"InterfaceByName", Func, 0}, + {"Interfaces", Func, 0}, + {"InvalidAddrError", Type, 0}, + {"JoinHostPort", Func, 0}, + {"Listen", Func, 0}, + {"ListenConfig", Type, 11}, + {"ListenConfig.Control", Field, 11}, + {"ListenConfig.KeepAlive", Field, 13}, + {"ListenIP", Func, 0}, + {"ListenMulticastUDP", Func, 0}, + {"ListenPacket", Func, 0}, + {"ListenTCP", Func, 0}, + {"ListenUDP", Func, 0}, + {"ListenUnix", Func, 0}, + {"ListenUnixgram", Func, 0}, + {"Listener", Type, 0}, + {"LookupAddr", Func, 0}, + {"LookupCNAME", Func, 0}, + {"LookupHost", Func, 0}, + {"LookupIP", Func, 0}, + {"LookupMX", Func, 0}, + {"LookupNS", Func, 1}, + {"LookupPort", Func, 0}, + {"LookupSRV", Func, 0}, + {"LookupTXT", Func, 0}, + {"MX", Type, 0}, + {"MX.Host", Field, 0}, + {"MX.Pref", Field, 0}, + {"NS", Type, 1}, + {"NS.Host", Field, 1}, + {"OpError", Type, 0}, + {"OpError.Addr", Field, 0}, + {"OpError.Err", Field, 0}, + {"OpError.Net", Field, 0}, + {"OpError.Op", Field, 0}, + {"OpError.Source", Field, 5}, + {"PacketConn", Type, 0}, + {"ParseCIDR", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Text", Field, 0}, + {"ParseError.Type", Field, 0}, + {"ParseIP", Func, 0}, + {"ParseMAC", Func, 0}, + {"Pipe", Func, 0}, + {"ResolveIPAddr", Func, 0}, + {"ResolveTCPAddr", Func, 0}, + {"ResolveUDPAddr", Func, 0}, + {"ResolveUnixAddr", Func, 0}, + {"Resolver", Type, 8}, + {"Resolver.Dial", Field, 9}, + {"Resolver.PreferGo", Field, 8}, + {"Resolver.StrictErrors", Field, 9}, + {"SRV", Type, 0}, + {"SRV.Port", Field, 0}, + {"SRV.Priority", Field, 0}, + {"SRV.Target", Field, 0}, + {"SRV.Weight", Field, 0}, + {"SplitHostPort", Func, 0}, + {"TCPAddr", Type, 0}, + {"TCPAddr.IP", Field, 0}, + {"TCPAddr.Port", Field, 0}, + {"TCPAddr.Zone", Field, 1}, + {"TCPAddrFromAddrPort", Func, 18}, + {"TCPConn", Type, 0}, + {"TCPListener", Type, 0}, + {"UDPAddr", Type, 0}, + {"UDPAddr.IP", Field, 0}, + {"UDPAddr.Port", Field, 0}, + {"UDPAddr.Zone", Field, 1}, + {"UDPAddrFromAddrPort", Func, 18}, + {"UDPConn", Type, 0}, + {"UnixAddr", Type, 0}, + {"UnixAddr.Name", Field, 0}, + {"UnixAddr.Net", Field, 0}, + {"UnixConn", Type, 0}, + {"UnixListener", Type, 0}, + {"UnknownNetworkError", Type, 0}, + }, + "net/http": { + {"(*Client).CloseIdleConnections", Method, 12}, + {"(*Client).Do", Method, 0}, + {"(*Client).Get", Method, 0}, + {"(*Client).Head", Method, 0}, + {"(*Client).Post", Method, 0}, + {"(*Client).PostForm", Method, 0}, + {"(*Cookie).String", Method, 0}, + {"(*Cookie).Valid", Method, 18}, + {"(*MaxBytesError).Error", Method, 19}, + {"(*ProtocolError).Error", Method, 0}, + {"(*ProtocolError).Is", Method, 21}, + {"(*Request).AddCookie", Method, 0}, + {"(*Request).BasicAuth", Method, 4}, + {"(*Request).Clone", Method, 13}, + {"(*Request).Context", Method, 7}, + {"(*Request).Cookie", Method, 0}, + {"(*Request).Cookies", Method, 0}, + {"(*Request).FormFile", Method, 0}, + {"(*Request).FormValue", Method, 0}, + {"(*Request).MultipartReader", Method, 0}, + {"(*Request).ParseForm", Method, 0}, + {"(*Request).ParseMultipartForm", Method, 0}, + {"(*Request).PathValue", Method, 22}, + {"(*Request).PostFormValue", Method, 1}, + {"(*Request).ProtoAtLeast", Method, 0}, + {"(*Request).Referer", Method, 0}, + {"(*Request).SetBasicAuth", Method, 0}, + {"(*Request).SetPathValue", Method, 22}, + {"(*Request).UserAgent", Method, 0}, + {"(*Request).WithContext", Method, 7}, + {"(*Request).Write", Method, 0}, + {"(*Request).WriteProxy", Method, 0}, + {"(*Response).Cookies", Method, 0}, + {"(*Response).Location", Method, 0}, + {"(*Response).ProtoAtLeast", Method, 0}, + {"(*Response).Write", Method, 0}, + {"(*ResponseController).EnableFullDuplex", Method, 21}, + {"(*ResponseController).Flush", Method, 20}, + {"(*ResponseController).Hijack", Method, 20}, + {"(*ResponseController).SetReadDeadline", Method, 20}, + {"(*ResponseController).SetWriteDeadline", Method, 20}, + {"(*ServeMux).Handle", Method, 0}, + {"(*ServeMux).HandleFunc", Method, 0}, + {"(*ServeMux).Handler", Method, 1}, + {"(*ServeMux).ServeHTTP", Method, 0}, + {"(*Server).Close", Method, 8}, + {"(*Server).ListenAndServe", Method, 0}, + {"(*Server).ListenAndServeTLS", Method, 0}, + {"(*Server).RegisterOnShutdown", Method, 9}, + {"(*Server).Serve", Method, 0}, + {"(*Server).ServeTLS", Method, 9}, + {"(*Server).SetKeepAlivesEnabled", Method, 3}, + {"(*Server).Shutdown", Method, 8}, + {"(*Transport).CancelRequest", Method, 1}, + {"(*Transport).Clone", Method, 13}, + {"(*Transport).CloseIdleConnections", Method, 0}, + {"(*Transport).RegisterProtocol", Method, 0}, + {"(*Transport).RoundTrip", Method, 0}, + {"(ConnState).String", Method, 3}, + {"(Dir).Open", Method, 0}, + {"(HandlerFunc).ServeHTTP", Method, 0}, + {"(Header).Add", Method, 0}, + {"(Header).Clone", Method, 13}, + {"(Header).Del", Method, 0}, + {"(Header).Get", Method, 0}, + {"(Header).Set", Method, 0}, + {"(Header).Values", Method, 14}, + {"(Header).Write", Method, 0}, + {"(Header).WriteSubset", Method, 0}, + {"AllowQuerySemicolons", Func, 17}, + {"CanonicalHeaderKey", Func, 0}, + {"Client", Type, 0}, + {"Client.CheckRedirect", Field, 0}, + {"Client.Jar", Field, 0}, + {"Client.Timeout", Field, 3}, + {"Client.Transport", Field, 0}, + {"CloseNotifier", Type, 1}, + {"ConnState", Type, 3}, + {"Cookie", Type, 0}, + {"Cookie.Domain", Field, 0}, + {"Cookie.Expires", Field, 0}, + {"Cookie.HttpOnly", Field, 0}, + {"Cookie.MaxAge", Field, 0}, + {"Cookie.Name", Field, 0}, + {"Cookie.Path", Field, 0}, + {"Cookie.Raw", Field, 0}, + {"Cookie.RawExpires", Field, 0}, + {"Cookie.SameSite", Field, 11}, + {"Cookie.Secure", Field, 0}, + {"Cookie.Unparsed", Field, 0}, + {"Cookie.Value", Field, 0}, + {"CookieJar", Type, 0}, + {"DefaultClient", Var, 0}, + {"DefaultMaxHeaderBytes", Const, 0}, + {"DefaultMaxIdleConnsPerHost", Const, 0}, + {"DefaultServeMux", Var, 0}, + {"DefaultTransport", Var, 0}, + {"DetectContentType", Func, 0}, + {"Dir", Type, 0}, + {"ErrAbortHandler", Var, 8}, + {"ErrBodyNotAllowed", Var, 0}, + {"ErrBodyReadAfterClose", Var, 0}, + {"ErrContentLength", Var, 0}, + {"ErrHandlerTimeout", Var, 0}, + {"ErrHeaderTooLong", Var, 0}, + {"ErrHijacked", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrMissingBoundary", Var, 0}, + {"ErrMissingContentLength", Var, 0}, + {"ErrMissingFile", Var, 0}, + {"ErrNoCookie", Var, 0}, + {"ErrNoLocation", Var, 0}, + {"ErrNotMultipart", Var, 0}, + {"ErrNotSupported", Var, 0}, + {"ErrSchemeMismatch", Var, 21}, + {"ErrServerClosed", Var, 8}, + {"ErrShortBody", Var, 0}, + {"ErrSkipAltProtocol", Var, 6}, + {"ErrUnexpectedTrailer", Var, 0}, + {"ErrUseLastResponse", Var, 7}, + {"ErrWriteAfterFlush", Var, 0}, + {"Error", Func, 0}, + {"FS", Func, 16}, + {"File", Type, 0}, + {"FileServer", Func, 0}, + {"FileServerFS", Func, 22}, + {"FileSystem", Type, 0}, + {"Flusher", Type, 0}, + {"Get", Func, 0}, + {"Handle", Func, 0}, + {"HandleFunc", Func, 0}, + {"Handler", Type, 0}, + {"HandlerFunc", Type, 0}, + {"Head", Func, 0}, + {"Header", Type, 0}, + {"Hijacker", Type, 0}, + {"ListenAndServe", Func, 0}, + {"ListenAndServeTLS", Func, 0}, + {"LocalAddrContextKey", Var, 7}, + {"MaxBytesError", Type, 19}, + {"MaxBytesError.Limit", Field, 19}, + {"MaxBytesHandler", Func, 18}, + {"MaxBytesReader", Func, 0}, + {"MethodConnect", Const, 6}, + {"MethodDelete", Const, 6}, + {"MethodGet", Const, 6}, + {"MethodHead", Const, 6}, + {"MethodOptions", Const, 6}, + {"MethodPatch", Const, 6}, + {"MethodPost", Const, 6}, + {"MethodPut", Const, 6}, + {"MethodTrace", Const, 6}, + {"NewFileTransport", Func, 0}, + {"NewFileTransportFS", Func, 22}, + {"NewRequest", Func, 0}, + {"NewRequestWithContext", Func, 13}, + {"NewResponseController", Func, 20}, + {"NewServeMux", Func, 0}, + {"NoBody", Var, 8}, + {"NotFound", Func, 0}, + {"NotFoundHandler", Func, 0}, + {"ParseHTTPVersion", Func, 0}, + {"ParseTime", Func, 1}, + {"Post", Func, 0}, + {"PostForm", Func, 0}, + {"ProtocolError", Type, 0}, + {"ProtocolError.ErrorString", Field, 0}, + {"ProxyFromEnvironment", Func, 0}, + {"ProxyURL", Func, 0}, + {"PushOptions", Type, 8}, + {"PushOptions.Header", Field, 8}, + {"PushOptions.Method", Field, 8}, + {"Pusher", Type, 8}, + {"ReadRequest", Func, 0}, + {"ReadResponse", Func, 0}, + {"Redirect", Func, 0}, + {"RedirectHandler", Func, 0}, + {"Request", Type, 0}, + {"Request.Body", Field, 0}, + {"Request.Cancel", Field, 5}, + {"Request.Close", Field, 0}, + {"Request.ContentLength", Field, 0}, + {"Request.Form", Field, 0}, + {"Request.GetBody", Field, 8}, + {"Request.Header", Field, 0}, + {"Request.Host", Field, 0}, + {"Request.Method", Field, 0}, + {"Request.MultipartForm", Field, 0}, + {"Request.PostForm", Field, 1}, + {"Request.Proto", Field, 0}, + {"Request.ProtoMajor", Field, 0}, + {"Request.ProtoMinor", Field, 0}, + {"Request.RemoteAddr", Field, 0}, + {"Request.RequestURI", Field, 0}, + {"Request.Response", Field, 7}, + {"Request.TLS", Field, 0}, + {"Request.Trailer", Field, 0}, + {"Request.TransferEncoding", Field, 0}, + {"Request.URL", Field, 0}, + {"Response", Type, 0}, + {"Response.Body", Field, 0}, + {"Response.Close", Field, 0}, + {"Response.ContentLength", Field, 0}, + {"Response.Header", Field, 0}, + {"Response.Proto", Field, 0}, + {"Response.ProtoMajor", Field, 0}, + {"Response.ProtoMinor", Field, 0}, + {"Response.Request", Field, 0}, + {"Response.Status", Field, 0}, + {"Response.StatusCode", Field, 0}, + {"Response.TLS", Field, 3}, + {"Response.Trailer", Field, 0}, + {"Response.TransferEncoding", Field, 0}, + {"Response.Uncompressed", Field, 7}, + {"ResponseController", Type, 20}, + {"ResponseWriter", Type, 0}, + {"RoundTripper", Type, 0}, + {"SameSite", Type, 11}, + {"SameSiteDefaultMode", Const, 11}, + {"SameSiteLaxMode", Const, 11}, + {"SameSiteNoneMode", Const, 13}, + {"SameSiteStrictMode", Const, 11}, + {"Serve", Func, 0}, + {"ServeContent", Func, 0}, + {"ServeFile", Func, 0}, + {"ServeFileFS", Func, 22}, + {"ServeMux", Type, 0}, + {"ServeTLS", Func, 9}, + {"Server", Type, 0}, + {"Server.Addr", Field, 0}, + {"Server.BaseContext", Field, 13}, + {"Server.ConnContext", Field, 13}, + {"Server.ConnState", Field, 3}, + {"Server.DisableGeneralOptionsHandler", Field, 20}, + {"Server.ErrorLog", Field, 3}, + {"Server.Handler", Field, 0}, + {"Server.IdleTimeout", Field, 8}, + {"Server.MaxHeaderBytes", Field, 0}, + {"Server.ReadHeaderTimeout", Field, 8}, + {"Server.ReadTimeout", Field, 0}, + {"Server.TLSConfig", Field, 0}, + {"Server.TLSNextProto", Field, 1}, + {"Server.WriteTimeout", Field, 0}, + {"ServerContextKey", Var, 7}, + {"SetCookie", Func, 0}, + {"StateActive", Const, 3}, + {"StateClosed", Const, 3}, + {"StateHijacked", Const, 3}, + {"StateIdle", Const, 3}, + {"StateNew", Const, 3}, + {"StatusAccepted", Const, 0}, + {"StatusAlreadyReported", Const, 7}, + {"StatusBadGateway", Const, 0}, + {"StatusBadRequest", Const, 0}, + {"StatusConflict", Const, 0}, + {"StatusContinue", Const, 0}, + {"StatusCreated", Const, 0}, + {"StatusEarlyHints", Const, 13}, + {"StatusExpectationFailed", Const, 0}, + {"StatusFailedDependency", Const, 7}, + {"StatusForbidden", Const, 0}, + {"StatusFound", Const, 0}, + {"StatusGatewayTimeout", Const, 0}, + {"StatusGone", Const, 0}, + {"StatusHTTPVersionNotSupported", Const, 0}, + {"StatusIMUsed", Const, 7}, + {"StatusInsufficientStorage", Const, 7}, + {"StatusInternalServerError", Const, 0}, + {"StatusLengthRequired", Const, 0}, + {"StatusLocked", Const, 7}, + {"StatusLoopDetected", Const, 7}, + {"StatusMethodNotAllowed", Const, 0}, + {"StatusMisdirectedRequest", Const, 11}, + {"StatusMovedPermanently", Const, 0}, + {"StatusMultiStatus", Const, 7}, + {"StatusMultipleChoices", Const, 0}, + {"StatusNetworkAuthenticationRequired", Const, 6}, + {"StatusNoContent", Const, 0}, + {"StatusNonAuthoritativeInfo", Const, 0}, + {"StatusNotAcceptable", Const, 0}, + {"StatusNotExtended", Const, 7}, + {"StatusNotFound", Const, 0}, + {"StatusNotImplemented", Const, 0}, + {"StatusNotModified", Const, 0}, + {"StatusOK", Const, 0}, + {"StatusPartialContent", Const, 0}, + {"StatusPaymentRequired", Const, 0}, + {"StatusPermanentRedirect", Const, 7}, + {"StatusPreconditionFailed", Const, 0}, + {"StatusPreconditionRequired", Const, 6}, + {"StatusProcessing", Const, 7}, + {"StatusProxyAuthRequired", Const, 0}, + {"StatusRequestEntityTooLarge", Const, 0}, + {"StatusRequestHeaderFieldsTooLarge", Const, 6}, + {"StatusRequestTimeout", Const, 0}, + {"StatusRequestURITooLong", Const, 0}, + {"StatusRequestedRangeNotSatisfiable", Const, 0}, + {"StatusResetContent", Const, 0}, + {"StatusSeeOther", Const, 0}, + {"StatusServiceUnavailable", Const, 0}, + {"StatusSwitchingProtocols", Const, 0}, + {"StatusTeapot", Const, 0}, + {"StatusTemporaryRedirect", Const, 0}, + {"StatusText", Func, 0}, + {"StatusTooEarly", Const, 12}, + {"StatusTooManyRequests", Const, 6}, + {"StatusUnauthorized", Const, 0}, + {"StatusUnavailableForLegalReasons", Const, 6}, + {"StatusUnprocessableEntity", Const, 7}, + {"StatusUnsupportedMediaType", Const, 0}, + {"StatusUpgradeRequired", Const, 7}, + {"StatusUseProxy", Const, 0}, + {"StatusVariantAlsoNegotiates", Const, 7}, + {"StripPrefix", Func, 0}, + {"TimeFormat", Const, 0}, + {"TimeoutHandler", Func, 0}, + {"TrailerPrefix", Const, 8}, + {"Transport", Type, 0}, + {"Transport.Dial", Field, 0}, + {"Transport.DialContext", Field, 7}, + {"Transport.DialTLS", Field, 4}, + {"Transport.DialTLSContext", Field, 14}, + {"Transport.DisableCompression", Field, 0}, + {"Transport.DisableKeepAlives", Field, 0}, + {"Transport.ExpectContinueTimeout", Field, 6}, + {"Transport.ForceAttemptHTTP2", Field, 13}, + {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.IdleConnTimeout", Field, 7}, + {"Transport.MaxConnsPerHost", Field, 11}, + {"Transport.MaxIdleConns", Field, 7}, + {"Transport.MaxIdleConnsPerHost", Field, 0}, + {"Transport.MaxResponseHeaderBytes", Field, 7}, + {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Proxy", Field, 0}, + {"Transport.ProxyConnectHeader", Field, 8}, + {"Transport.ReadBufferSize", Field, 13}, + {"Transport.ResponseHeaderTimeout", Field, 1}, + {"Transport.TLSClientConfig", Field, 0}, + {"Transport.TLSHandshakeTimeout", Field, 3}, + {"Transport.TLSNextProto", Field, 6}, + {"Transport.WriteBufferSize", Field, 13}, + }, + "net/http/cgi": { + {"(*Handler).ServeHTTP", Method, 0}, + {"Handler", Type, 0}, + {"Handler.Args", Field, 0}, + {"Handler.Dir", Field, 0}, + {"Handler.Env", Field, 0}, + {"Handler.InheritEnv", Field, 0}, + {"Handler.Logger", Field, 0}, + {"Handler.Path", Field, 0}, + {"Handler.PathLocationHandler", Field, 0}, + {"Handler.Root", Field, 0}, + {"Handler.Stderr", Field, 7}, + {"Request", Func, 0}, + {"RequestFromMap", Func, 0}, + {"Serve", Func, 0}, + }, + "net/http/cookiejar": { + {"(*Jar).Cookies", Method, 1}, + {"(*Jar).SetCookies", Method, 1}, + {"Jar", Type, 1}, + {"New", Func, 1}, + {"Options", Type, 1}, + {"Options.PublicSuffixList", Field, 1}, + {"PublicSuffixList", Type, 1}, + }, + "net/http/fcgi": { + {"ErrConnClosed", Var, 5}, + {"ErrRequestAborted", Var, 5}, + {"ProcessEnv", Func, 9}, + {"Serve", Func, 0}, + }, + "net/http/httptest": { + {"(*ResponseRecorder).Flush", Method, 0}, + {"(*ResponseRecorder).Header", Method, 0}, + {"(*ResponseRecorder).Result", Method, 7}, + {"(*ResponseRecorder).Write", Method, 0}, + {"(*ResponseRecorder).WriteHeader", Method, 0}, + {"(*ResponseRecorder).WriteString", Method, 6}, + {"(*Server).Certificate", Method, 9}, + {"(*Server).Client", Method, 9}, + {"(*Server).Close", Method, 0}, + {"(*Server).CloseClientConnections", Method, 0}, + {"(*Server).Start", Method, 0}, + {"(*Server).StartTLS", Method, 0}, + {"DefaultRemoteAddr", Const, 0}, + {"NewRecorder", Func, 0}, + {"NewRequest", Func, 7}, + {"NewServer", Func, 0}, + {"NewTLSServer", Func, 0}, + {"NewUnstartedServer", Func, 0}, + {"ResponseRecorder", Type, 0}, + {"ResponseRecorder.Body", Field, 0}, + {"ResponseRecorder.Code", Field, 0}, + {"ResponseRecorder.Flushed", Field, 0}, + {"ResponseRecorder.HeaderMap", Field, 0}, + {"Server", Type, 0}, + {"Server.Config", Field, 0}, + {"Server.EnableHTTP2", Field, 14}, + {"Server.Listener", Field, 0}, + {"Server.TLS", Field, 0}, + {"Server.URL", Field, 0}, + }, + "net/http/httptrace": { + {"ClientTrace", Type, 7}, + {"ClientTrace.ConnectDone", Field, 7}, + {"ClientTrace.ConnectStart", Field, 7}, + {"ClientTrace.DNSDone", Field, 7}, + {"ClientTrace.DNSStart", Field, 7}, + {"ClientTrace.GetConn", Field, 7}, + {"ClientTrace.Got100Continue", Field, 7}, + {"ClientTrace.Got1xxResponse", Field, 11}, + {"ClientTrace.GotConn", Field, 7}, + {"ClientTrace.GotFirstResponseByte", Field, 7}, + {"ClientTrace.PutIdleConn", Field, 7}, + {"ClientTrace.TLSHandshakeDone", Field, 8}, + {"ClientTrace.TLSHandshakeStart", Field, 8}, + {"ClientTrace.Wait100Continue", Field, 7}, + {"ClientTrace.WroteHeaderField", Field, 11}, + {"ClientTrace.WroteHeaders", Field, 7}, + {"ClientTrace.WroteRequest", Field, 7}, + {"ContextClientTrace", Func, 7}, + {"DNSDoneInfo", Type, 7}, + {"DNSDoneInfo.Addrs", Field, 7}, + {"DNSDoneInfo.Coalesced", Field, 7}, + {"DNSDoneInfo.Err", Field, 7}, + {"DNSStartInfo", Type, 7}, + {"DNSStartInfo.Host", Field, 7}, + {"GotConnInfo", Type, 7}, + {"GotConnInfo.Conn", Field, 7}, + {"GotConnInfo.IdleTime", Field, 7}, + {"GotConnInfo.Reused", Field, 7}, + {"GotConnInfo.WasIdle", Field, 7}, + {"WithClientTrace", Func, 7}, + {"WroteRequestInfo", Type, 7}, + {"WroteRequestInfo.Err", Field, 7}, + }, + "net/http/httputil": { + {"(*ClientConn).Close", Method, 0}, + {"(*ClientConn).Do", Method, 0}, + {"(*ClientConn).Hijack", Method, 0}, + {"(*ClientConn).Pending", Method, 0}, + {"(*ClientConn).Read", Method, 0}, + {"(*ClientConn).Write", Method, 0}, + {"(*ProxyRequest).SetURL", Method, 20}, + {"(*ProxyRequest).SetXForwarded", Method, 20}, + {"(*ReverseProxy).ServeHTTP", Method, 0}, + {"(*ServerConn).Close", Method, 0}, + {"(*ServerConn).Hijack", Method, 0}, + {"(*ServerConn).Pending", Method, 0}, + {"(*ServerConn).Read", Method, 0}, + {"(*ServerConn).Write", Method, 0}, + {"BufferPool", Type, 6}, + {"ClientConn", Type, 0}, + {"DumpRequest", Func, 0}, + {"DumpRequestOut", Func, 0}, + {"DumpResponse", Func, 0}, + {"ErrClosed", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrPersistEOF", Var, 0}, + {"ErrPipeline", Var, 0}, + {"NewChunkedReader", Func, 0}, + {"NewChunkedWriter", Func, 0}, + {"NewClientConn", Func, 0}, + {"NewProxyClientConn", Func, 0}, + {"NewServerConn", Func, 0}, + {"NewSingleHostReverseProxy", Func, 0}, + {"ProxyRequest", Type, 20}, + {"ProxyRequest.In", Field, 20}, + {"ProxyRequest.Out", Field, 20}, + {"ReverseProxy", Type, 0}, + {"ReverseProxy.BufferPool", Field, 6}, + {"ReverseProxy.Director", Field, 0}, + {"ReverseProxy.ErrorHandler", Field, 11}, + {"ReverseProxy.ErrorLog", Field, 4}, + {"ReverseProxy.FlushInterval", Field, 0}, + {"ReverseProxy.ModifyResponse", Field, 8}, + {"ReverseProxy.Rewrite", Field, 20}, + {"ReverseProxy.Transport", Field, 0}, + {"ServerConn", Type, 0}, + }, + "net/http/pprof": { + {"Cmdline", Func, 0}, + {"Handler", Func, 0}, + {"Index", Func, 0}, + {"Profile", Func, 0}, + {"Symbol", Func, 0}, + {"Trace", Func, 5}, + }, + "net/mail": { + {"(*Address).String", Method, 0}, + {"(*AddressParser).Parse", Method, 5}, + {"(*AddressParser).ParseList", Method, 5}, + {"(Header).AddressList", Method, 0}, + {"(Header).Date", Method, 0}, + {"(Header).Get", Method, 0}, + {"Address", Type, 0}, + {"Address.Address", Field, 0}, + {"Address.Name", Field, 0}, + {"AddressParser", Type, 5}, + {"AddressParser.WordDecoder", Field, 5}, + {"ErrHeaderNotPresent", Var, 0}, + {"Header", Type, 0}, + {"Message", Type, 0}, + {"Message.Body", Field, 0}, + {"Message.Header", Field, 0}, + {"ParseAddress", Func, 1}, + {"ParseAddressList", Func, 1}, + {"ParseDate", Func, 8}, + {"ReadMessage", Func, 0}, + }, + "net/netip": { + {"(*Addr).UnmarshalBinary", Method, 18}, + {"(*Addr).UnmarshalText", Method, 18}, + {"(*AddrPort).UnmarshalBinary", Method, 18}, + {"(*AddrPort).UnmarshalText", Method, 18}, + {"(*Prefix).UnmarshalBinary", Method, 18}, + {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendTo", Method, 18}, + {"(Addr).As16", Method, 18}, + {"(Addr).As4", Method, 18}, + {"(Addr).AsSlice", Method, 18}, + {"(Addr).BitLen", Method, 18}, + {"(Addr).Compare", Method, 18}, + {"(Addr).Is4", Method, 18}, + {"(Addr).Is4In6", Method, 18}, + {"(Addr).Is6", Method, 18}, + {"(Addr).IsGlobalUnicast", Method, 18}, + {"(Addr).IsInterfaceLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalUnicast", Method, 18}, + {"(Addr).IsLoopback", Method, 18}, + {"(Addr).IsMulticast", Method, 18}, + {"(Addr).IsPrivate", Method, 18}, + {"(Addr).IsUnspecified", Method, 18}, + {"(Addr).IsValid", Method, 18}, + {"(Addr).Less", Method, 18}, + {"(Addr).MarshalBinary", Method, 18}, + {"(Addr).MarshalText", Method, 18}, + {"(Addr).Next", Method, 18}, + {"(Addr).Prefix", Method, 18}, + {"(Addr).Prev", Method, 18}, + {"(Addr).String", Method, 18}, + {"(Addr).StringExpanded", Method, 18}, + {"(Addr).Unmap", Method, 18}, + {"(Addr).WithZone", Method, 18}, + {"(Addr).Zone", Method, 18}, + {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendTo", Method, 18}, + {"(AddrPort).Compare", Method, 22}, + {"(AddrPort).IsValid", Method, 18}, + {"(AddrPort).MarshalBinary", Method, 18}, + {"(AddrPort).MarshalText", Method, 18}, + {"(AddrPort).Port", Method, 18}, + {"(AddrPort).String", Method, 18}, + {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendTo", Method, 18}, + {"(Prefix).Bits", Method, 18}, + {"(Prefix).Contains", Method, 18}, + {"(Prefix).IsSingleIP", Method, 18}, + {"(Prefix).IsValid", Method, 18}, + {"(Prefix).MarshalBinary", Method, 18}, + {"(Prefix).MarshalText", Method, 18}, + {"(Prefix).Masked", Method, 18}, + {"(Prefix).Overlaps", Method, 18}, + {"(Prefix).String", Method, 18}, + {"Addr", Type, 18}, + {"AddrFrom16", Func, 18}, + {"AddrFrom4", Func, 18}, + {"AddrFromSlice", Func, 18}, + {"AddrPort", Type, 18}, + {"AddrPortFrom", Func, 18}, + {"IPv4Unspecified", Func, 18}, + {"IPv6LinkLocalAllNodes", Func, 18}, + {"IPv6LinkLocalAllRouters", Func, 20}, + {"IPv6Loopback", Func, 20}, + {"IPv6Unspecified", Func, 18}, + {"MustParseAddr", Func, 18}, + {"MustParseAddrPort", Func, 18}, + {"MustParsePrefix", Func, 18}, + {"ParseAddr", Func, 18}, + {"ParseAddrPort", Func, 18}, + {"ParsePrefix", Func, 18}, + {"Prefix", Type, 18}, + {"PrefixFrom", Func, 18}, + }, + "net/rpc": { + {"(*Client).Call", Method, 0}, + {"(*Client).Close", Method, 0}, + {"(*Client).Go", Method, 0}, + {"(*Server).Accept", Method, 0}, + {"(*Server).HandleHTTP", Method, 0}, + {"(*Server).Register", Method, 0}, + {"(*Server).RegisterName", Method, 0}, + {"(*Server).ServeCodec", Method, 0}, + {"(*Server).ServeConn", Method, 0}, + {"(*Server).ServeHTTP", Method, 0}, + {"(*Server).ServeRequest", Method, 0}, + {"(ServerError).Error", Method, 0}, + {"Accept", Func, 0}, + {"Call", Type, 0}, + {"Call.Args", Field, 0}, + {"Call.Done", Field, 0}, + {"Call.Error", Field, 0}, + {"Call.Reply", Field, 0}, + {"Call.ServiceMethod", Field, 0}, + {"Client", Type, 0}, + {"ClientCodec", Type, 0}, + {"DefaultDebugPath", Const, 0}, + {"DefaultRPCPath", Const, 0}, + {"DefaultServer", Var, 0}, + {"Dial", Func, 0}, + {"DialHTTP", Func, 0}, + {"DialHTTPPath", Func, 0}, + {"ErrShutdown", Var, 0}, + {"HandleHTTP", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientWithCodec", Func, 0}, + {"NewServer", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + {"Request", Type, 0}, + {"Request.Seq", Field, 0}, + {"Request.ServiceMethod", Field, 0}, + {"Response", Type, 0}, + {"Response.Error", Field, 0}, + {"Response.Seq", Field, 0}, + {"Response.ServiceMethod", Field, 0}, + {"ServeCodec", Func, 0}, + {"ServeConn", Func, 0}, + {"ServeRequest", Func, 0}, + {"Server", Type, 0}, + {"ServerCodec", Type, 0}, + {"ServerError", Type, 0}, + }, + "net/rpc/jsonrpc": { + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientCodec", Func, 0}, + {"NewServerCodec", Func, 0}, + {"ServeConn", Func, 0}, + }, + "net/smtp": { + {"(*Client).Auth", Method, 0}, + {"(*Client).Close", Method, 2}, + {"(*Client).Data", Method, 0}, + {"(*Client).Extension", Method, 0}, + {"(*Client).Hello", Method, 1}, + {"(*Client).Mail", Method, 0}, + {"(*Client).Noop", Method, 10}, + {"(*Client).Quit", Method, 0}, + {"(*Client).Rcpt", Method, 0}, + {"(*Client).Reset", Method, 0}, + {"(*Client).StartTLS", Method, 0}, + {"(*Client).TLSConnectionState", Method, 5}, + {"(*Client).Verify", Method, 0}, + {"Auth", Type, 0}, + {"CRAMMD5Auth", Func, 0}, + {"Client", Type, 0}, + {"Client.Text", Field, 0}, + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"PlainAuth", Func, 0}, + {"SendMail", Func, 0}, + {"ServerInfo", Type, 0}, + {"ServerInfo.Auth", Field, 0}, + {"ServerInfo.Name", Field, 0}, + {"ServerInfo.TLS", Field, 0}, + }, + "net/textproto": { + {"(*Conn).Close", Method, 0}, + {"(*Conn).Cmd", Method, 0}, + {"(*Conn).DotReader", Method, 0}, + {"(*Conn).DotWriter", Method, 0}, + {"(*Conn).EndRequest", Method, 0}, + {"(*Conn).EndResponse", Method, 0}, + {"(*Conn).Next", Method, 0}, + {"(*Conn).PrintfLine", Method, 0}, + {"(*Conn).ReadCodeLine", Method, 0}, + {"(*Conn).ReadContinuedLine", Method, 0}, + {"(*Conn).ReadContinuedLineBytes", Method, 0}, + {"(*Conn).ReadDotBytes", Method, 0}, + {"(*Conn).ReadDotLines", Method, 0}, + {"(*Conn).ReadLine", Method, 0}, + {"(*Conn).ReadLineBytes", Method, 0}, + {"(*Conn).ReadMIMEHeader", Method, 0}, + {"(*Conn).ReadResponse", Method, 0}, + {"(*Conn).StartRequest", Method, 0}, + {"(*Conn).StartResponse", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Pipeline).EndRequest", Method, 0}, + {"(*Pipeline).EndResponse", Method, 0}, + {"(*Pipeline).Next", Method, 0}, + {"(*Pipeline).StartRequest", Method, 0}, + {"(*Pipeline).StartResponse", Method, 0}, + {"(*Reader).DotReader", Method, 0}, + {"(*Reader).ReadCodeLine", Method, 0}, + {"(*Reader).ReadContinuedLine", Method, 0}, + {"(*Reader).ReadContinuedLineBytes", Method, 0}, + {"(*Reader).ReadDotBytes", Method, 0}, + {"(*Reader).ReadDotLines", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadLineBytes", Method, 0}, + {"(*Reader).ReadMIMEHeader", Method, 0}, + {"(*Reader).ReadResponse", Method, 0}, + {"(*Writer).DotWriter", Method, 0}, + {"(*Writer).PrintfLine", Method, 0}, + {"(MIMEHeader).Add", Method, 0}, + {"(MIMEHeader).Del", Method, 0}, + {"(MIMEHeader).Get", Method, 0}, + {"(MIMEHeader).Set", Method, 0}, + {"(MIMEHeader).Values", Method, 14}, + {"(ProtocolError).Error", Method, 0}, + {"CanonicalMIMEHeaderKey", Func, 0}, + {"Conn", Type, 0}, + {"Conn.Pipeline", Field, 0}, + {"Conn.Reader", Field, 0}, + {"Conn.Writer", Field, 0}, + {"Dial", Func, 0}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Msg", Field, 0}, + {"MIMEHeader", Type, 0}, + {"NewConn", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Pipeline", Type, 0}, + {"ProtocolError", Type, 0}, + {"Reader", Type, 0}, + {"Reader.R", Field, 0}, + {"TrimBytes", Func, 1}, + {"TrimString", Func, 1}, + {"Writer", Type, 0}, + {"Writer.W", Field, 0}, + }, + "net/url": { + {"(*Error).Error", Method, 0}, + {"(*Error).Temporary", Method, 6}, + {"(*Error).Timeout", Method, 6}, + {"(*Error).Unwrap", Method, 13}, + {"(*URL).EscapedFragment", Method, 15}, + {"(*URL).EscapedPath", Method, 5}, + {"(*URL).Hostname", Method, 8}, + {"(*URL).IsAbs", Method, 0}, + {"(*URL).JoinPath", Method, 19}, + {"(*URL).MarshalBinary", Method, 8}, + {"(*URL).Parse", Method, 0}, + {"(*URL).Port", Method, 8}, + {"(*URL).Query", Method, 0}, + {"(*URL).Redacted", Method, 15}, + {"(*URL).RequestURI", Method, 0}, + {"(*URL).ResolveReference", Method, 0}, + {"(*URL).String", Method, 0}, + {"(*URL).UnmarshalBinary", Method, 8}, + {"(*Userinfo).Password", Method, 0}, + {"(*Userinfo).String", Method, 0}, + {"(*Userinfo).Username", Method, 0}, + {"(EscapeError).Error", Method, 0}, + {"(InvalidHostError).Error", Method, 6}, + {"(Values).Add", Method, 0}, + {"(Values).Del", Method, 0}, + {"(Values).Encode", Method, 0}, + {"(Values).Get", Method, 0}, + {"(Values).Has", Method, 17}, + {"(Values).Set", Method, 0}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Op", Field, 0}, + {"Error.URL", Field, 0}, + {"EscapeError", Type, 0}, + {"InvalidHostError", Type, 6}, + {"JoinPath", Func, 19}, + {"Parse", Func, 0}, + {"ParseQuery", Func, 0}, + {"ParseRequestURI", Func, 0}, + {"PathEscape", Func, 8}, + {"PathUnescape", Func, 8}, + {"QueryEscape", Func, 0}, + {"QueryUnescape", Func, 0}, + {"URL", Type, 0}, + {"URL.ForceQuery", Field, 7}, + {"URL.Fragment", Field, 0}, + {"URL.Host", Field, 0}, + {"URL.OmitHost", Field, 19}, + {"URL.Opaque", Field, 0}, + {"URL.Path", Field, 0}, + {"URL.RawFragment", Field, 15}, + {"URL.RawPath", Field, 5}, + {"URL.RawQuery", Field, 0}, + {"URL.Scheme", Field, 0}, + {"URL.User", Field, 0}, + {"User", Func, 0}, + {"UserPassword", Func, 0}, + {"Userinfo", Type, 0}, + {"Values", Type, 0}, + }, + "os": { + {"(*File).Chdir", Method, 0}, + {"(*File).Chmod", Method, 0}, + {"(*File).Chown", Method, 0}, + {"(*File).Close", Method, 0}, + {"(*File).Fd", Method, 0}, + {"(*File).Name", Method, 0}, + {"(*File).Read", Method, 0}, + {"(*File).ReadAt", Method, 0}, + {"(*File).ReadDir", Method, 16}, + {"(*File).ReadFrom", Method, 15}, + {"(*File).Readdir", Method, 0}, + {"(*File).Readdirnames", Method, 0}, + {"(*File).Seek", Method, 0}, + {"(*File).SetDeadline", Method, 10}, + {"(*File).SetReadDeadline", Method, 10}, + {"(*File).SetWriteDeadline", Method, 10}, + {"(*File).Stat", Method, 0}, + {"(*File).Sync", Method, 0}, + {"(*File).SyscallConn", Method, 12}, + {"(*File).Truncate", Method, 0}, + {"(*File).Write", Method, 0}, + {"(*File).WriteAt", Method, 0}, + {"(*File).WriteString", Method, 0}, + {"(*File).WriteTo", Method, 22}, + {"(*LinkError).Error", Method, 0}, + {"(*LinkError).Unwrap", Method, 13}, + {"(*PathError).Error", Method, 0}, + {"(*PathError).Timeout", Method, 10}, + {"(*PathError).Unwrap", Method, 13}, + {"(*Process).Kill", Method, 0}, + {"(*Process).Release", Method, 0}, + {"(*Process).Signal", Method, 0}, + {"(*Process).Wait", Method, 0}, + {"(*ProcessState).ExitCode", Method, 12}, + {"(*ProcessState).Exited", Method, 0}, + {"(*ProcessState).Pid", Method, 0}, + {"(*ProcessState).String", Method, 0}, + {"(*ProcessState).Success", Method, 0}, + {"(*ProcessState).Sys", Method, 0}, + {"(*ProcessState).SysUsage", Method, 0}, + {"(*ProcessState).SystemTime", Method, 0}, + {"(*ProcessState).UserTime", Method, 0}, + {"(*SyscallError).Error", Method, 0}, + {"(*SyscallError).Timeout", Method, 10}, + {"(*SyscallError).Unwrap", Method, 13}, + {"(FileMode).IsDir", Method, 0}, + {"(FileMode).IsRegular", Method, 1}, + {"(FileMode).Perm", Method, 0}, + {"(FileMode).String", Method, 0}, + {"Args", Var, 0}, + {"Chdir", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chtimes", Func, 0}, + {"Clearenv", Func, 0}, + {"Create", Func, 0}, + {"CreateTemp", Func, 16}, + {"DevNull", Const, 0}, + {"DirEntry", Type, 16}, + {"DirFS", Func, 16}, + {"Environ", Func, 0}, + {"ErrClosed", Var, 8}, + {"ErrDeadlineExceeded", Var, 15}, + {"ErrExist", Var, 0}, + {"ErrInvalid", Var, 0}, + {"ErrNoDeadline", Var, 10}, + {"ErrNotExist", Var, 0}, + {"ErrPermission", Var, 0}, + {"ErrProcessDone", Var, 16}, + {"Executable", Func, 8}, + {"Exit", Func, 0}, + {"Expand", Func, 0}, + {"ExpandEnv", Func, 0}, + {"File", Type, 0}, + {"FileInfo", Type, 0}, + {"FileMode", Type, 0}, + {"FindProcess", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Hostname", Func, 0}, + {"Interrupt", Var, 0}, + {"IsExist", Func, 0}, + {"IsNotExist", Func, 0}, + {"IsPathSeparator", Func, 0}, + {"IsPermission", Func, 0}, + {"IsTimeout", Func, 10}, + {"Kill", Var, 0}, + {"Lchown", Func, 0}, + {"Link", Func, 0}, + {"LinkError", Type, 0}, + {"LinkError.Err", Field, 0}, + {"LinkError.New", Field, 0}, + {"LinkError.Old", Field, 0}, + {"LinkError.Op", Field, 0}, + {"LookupEnv", Func, 5}, + {"Lstat", Func, 0}, + {"Mkdir", Func, 0}, + {"MkdirAll", Func, 0}, + {"MkdirTemp", Func, 16}, + {"ModeAppend", Const, 0}, + {"ModeCharDevice", Const, 0}, + {"ModeDevice", Const, 0}, + {"ModeDir", Const, 0}, + {"ModeExclusive", Const, 0}, + {"ModeIrregular", Const, 11}, + {"ModeNamedPipe", Const, 0}, + {"ModePerm", Const, 0}, + {"ModeSetgid", Const, 0}, + {"ModeSetuid", Const, 0}, + {"ModeSocket", Const, 0}, + {"ModeSticky", Const, 0}, + {"ModeSymlink", Const, 0}, + {"ModeTemporary", Const, 0}, + {"ModeType", Const, 0}, + {"NewFile", Func, 0}, + {"NewSyscallError", Func, 0}, + {"O_APPEND", Const, 0}, + {"O_CREATE", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenFile", Func, 0}, + {"PathError", Type, 0}, + {"PathError.Err", Field, 0}, + {"PathError.Op", Field, 0}, + {"PathError.Path", Field, 0}, + {"PathListSeparator", Const, 0}, + {"PathSeparator", Const, 0}, + {"Pipe", Func, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process", Type, 0}, + {"Process.Pid", Field, 0}, + {"ProcessState", Type, 0}, + {"ReadDir", Func, 16}, + {"ReadFile", Func, 16}, + {"Readlink", Func, 0}, + {"Remove", Func, 0}, + {"RemoveAll", Func, 0}, + {"Rename", Func, 0}, + {"SEEK_CUR", Const, 0}, + {"SEEK_END", Const, 0}, + {"SEEK_SET", Const, 0}, + {"SameFile", Func, 0}, + {"Setenv", Func, 0}, + {"Signal", Type, 0}, + {"StartProcess", Func, 0}, + {"Stat", Func, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"Symlink", Func, 0}, + {"SyscallError", Type, 0}, + {"SyscallError.Err", Field, 0}, + {"SyscallError.Syscall", Field, 0}, + {"TempDir", Func, 0}, + {"Truncate", Func, 0}, + {"Unsetenv", Func, 4}, + {"UserCacheDir", Func, 11}, + {"UserConfigDir", Func, 13}, + {"UserHomeDir", Func, 12}, + {"WriteFile", Func, 16}, + }, + "os/exec": { + {"(*Cmd).CombinedOutput", Method, 0}, + {"(*Cmd).Environ", Method, 19}, + {"(*Cmd).Output", Method, 0}, + {"(*Cmd).Run", Method, 0}, + {"(*Cmd).Start", Method, 0}, + {"(*Cmd).StderrPipe", Method, 0}, + {"(*Cmd).StdinPipe", Method, 0}, + {"(*Cmd).StdoutPipe", Method, 0}, + {"(*Cmd).String", Method, 13}, + {"(*Cmd).Wait", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Error).Unwrap", Method, 13}, + {"(*ExitError).Error", Method, 0}, + {"(ExitError).ExitCode", Method, 12}, + {"(ExitError).Exited", Method, 0}, + {"(ExitError).Pid", Method, 0}, + {"(ExitError).String", Method, 0}, + {"(ExitError).Success", Method, 0}, + {"(ExitError).Sys", Method, 0}, + {"(ExitError).SysUsage", Method, 0}, + {"(ExitError).SystemTime", Method, 0}, + {"(ExitError).UserTime", Method, 0}, + {"Cmd", Type, 0}, + {"Cmd.Args", Field, 0}, + {"Cmd.Cancel", Field, 20}, + {"Cmd.Dir", Field, 0}, + {"Cmd.Env", Field, 0}, + {"Cmd.Err", Field, 19}, + {"Cmd.ExtraFiles", Field, 0}, + {"Cmd.Path", Field, 0}, + {"Cmd.Process", Field, 0}, + {"Cmd.ProcessState", Field, 0}, + {"Cmd.Stderr", Field, 0}, + {"Cmd.Stdin", Field, 0}, + {"Cmd.Stdout", Field, 0}, + {"Cmd.SysProcAttr", Field, 0}, + {"Cmd.WaitDelay", Field, 20}, + {"Command", Func, 0}, + {"CommandContext", Func, 7}, + {"ErrDot", Var, 19}, + {"ErrNotFound", Var, 0}, + {"ErrWaitDelay", Var, 20}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Name", Field, 0}, + {"ExitError", Type, 0}, + {"ExitError.ProcessState", Field, 0}, + {"ExitError.Stderr", Field, 6}, + {"LookPath", Func, 0}, + }, + "os/signal": { + {"Ignore", Func, 5}, + {"Ignored", Func, 11}, + {"Notify", Func, 0}, + {"NotifyContext", Func, 16}, + {"Reset", Func, 5}, + {"Stop", Func, 1}, + }, + "os/user": { + {"(*User).GroupIds", Method, 7}, + {"(UnknownGroupError).Error", Method, 7}, + {"(UnknownGroupIdError).Error", Method, 7}, + {"(UnknownUserError).Error", Method, 0}, + {"(UnknownUserIdError).Error", Method, 0}, + {"Current", Func, 0}, + {"Group", Type, 7}, + {"Group.Gid", Field, 7}, + {"Group.Name", Field, 7}, + {"Lookup", Func, 0}, + {"LookupGroup", Func, 7}, + {"LookupGroupId", Func, 7}, + {"LookupId", Func, 0}, + {"UnknownGroupError", Type, 7}, + {"UnknownGroupIdError", Type, 7}, + {"UnknownUserError", Type, 0}, + {"UnknownUserIdError", Type, 0}, + {"User", Type, 0}, + {"User.Gid", Field, 0}, + {"User.HomeDir", Field, 0}, + {"User.Name", Field, 0}, + {"User.Uid", Field, 0}, + {"User.Username", Field, 0}, + }, + "path": { + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"Ext", Func, 0}, + {"IsAbs", Func, 0}, + {"Join", Func, 0}, + {"Match", Func, 0}, + {"Split", Func, 0}, + }, + "path/filepath": { + {"Abs", Func, 0}, + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"EvalSymlinks", Func, 0}, + {"Ext", Func, 0}, + {"FromSlash", Func, 0}, + {"Glob", Func, 0}, + {"HasPrefix", Func, 0}, + {"IsAbs", Func, 0}, + {"IsLocal", Func, 20}, + {"Join", Func, 0}, + {"ListSeparator", Const, 0}, + {"Match", Func, 0}, + {"Rel", Func, 0}, + {"Separator", Const, 0}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 0}, + {"Split", Func, 0}, + {"SplitList", Func, 0}, + {"ToSlash", Func, 0}, + {"VolumeName", Func, 0}, + {"Walk", Func, 0}, + {"WalkDir", Func, 16}, + {"WalkFunc", Type, 0}, + }, + "plugin": { + {"(*Plugin).Lookup", Method, 8}, + {"Open", Func, 8}, + {"Plugin", Type, 8}, + {"Symbol", Type, 8}, + }, + "reflect": { + {"(*MapIter).Key", Method, 12}, + {"(*MapIter).Next", Method, 12}, + {"(*MapIter).Reset", Method, 18}, + {"(*MapIter).Value", Method, 12}, + {"(*ValueError).Error", Method, 0}, + {"(ChanDir).String", Method, 0}, + {"(Kind).String", Method, 0}, + {"(Method).IsExported", Method, 17}, + {"(StructField).IsExported", Method, 17}, + {"(StructTag).Get", Method, 0}, + {"(StructTag).Lookup", Method, 7}, + {"(Value).Addr", Method, 0}, + {"(Value).Bool", Method, 0}, + {"(Value).Bytes", Method, 0}, + {"(Value).Call", Method, 0}, + {"(Value).CallSlice", Method, 0}, + {"(Value).CanAddr", Method, 0}, + {"(Value).CanComplex", Method, 18}, + {"(Value).CanConvert", Method, 17}, + {"(Value).CanFloat", Method, 18}, + {"(Value).CanInt", Method, 18}, + {"(Value).CanInterface", Method, 0}, + {"(Value).CanSet", Method, 0}, + {"(Value).CanUint", Method, 18}, + {"(Value).Cap", Method, 0}, + {"(Value).Clear", Method, 21}, + {"(Value).Close", Method, 0}, + {"(Value).Comparable", Method, 20}, + {"(Value).Complex", Method, 0}, + {"(Value).Convert", Method, 1}, + {"(Value).Elem", Method, 0}, + {"(Value).Equal", Method, 20}, + {"(Value).Field", Method, 0}, + {"(Value).FieldByIndex", Method, 0}, + {"(Value).FieldByIndexErr", Method, 18}, + {"(Value).FieldByName", Method, 0}, + {"(Value).FieldByNameFunc", Method, 0}, + {"(Value).Float", Method, 0}, + {"(Value).Grow", Method, 20}, + {"(Value).Index", Method, 0}, + {"(Value).Int", Method, 0}, + {"(Value).Interface", Method, 0}, + {"(Value).InterfaceData", Method, 0}, + {"(Value).IsNil", Method, 0}, + {"(Value).IsValid", Method, 0}, + {"(Value).IsZero", Method, 13}, + {"(Value).Kind", Method, 0}, + {"(Value).Len", Method, 0}, + {"(Value).MapIndex", Method, 0}, + {"(Value).MapKeys", Method, 0}, + {"(Value).MapRange", Method, 12}, + {"(Value).Method", Method, 0}, + {"(Value).MethodByName", Method, 0}, + {"(Value).NumField", Method, 0}, + {"(Value).NumMethod", Method, 0}, + {"(Value).OverflowComplex", Method, 0}, + {"(Value).OverflowFloat", Method, 0}, + {"(Value).OverflowInt", Method, 0}, + {"(Value).OverflowUint", Method, 0}, + {"(Value).Pointer", Method, 0}, + {"(Value).Recv", Method, 0}, + {"(Value).Send", Method, 0}, + {"(Value).Set", Method, 0}, + {"(Value).SetBool", Method, 0}, + {"(Value).SetBytes", Method, 0}, + {"(Value).SetCap", Method, 2}, + {"(Value).SetComplex", Method, 0}, + {"(Value).SetFloat", Method, 0}, + {"(Value).SetInt", Method, 0}, + {"(Value).SetIterKey", Method, 18}, + {"(Value).SetIterValue", Method, 18}, + {"(Value).SetLen", Method, 0}, + {"(Value).SetMapIndex", Method, 0}, + {"(Value).SetPointer", Method, 0}, + {"(Value).SetString", Method, 0}, + {"(Value).SetUint", Method, 0}, + {"(Value).SetZero", Method, 20}, + {"(Value).Slice", Method, 0}, + {"(Value).Slice3", Method, 2}, + {"(Value).String", Method, 0}, + {"(Value).TryRecv", Method, 0}, + {"(Value).TrySend", Method, 0}, + {"(Value).Type", Method, 0}, + {"(Value).Uint", Method, 0}, + {"(Value).UnsafeAddr", Method, 0}, + {"(Value).UnsafePointer", Method, 18}, + {"Append", Func, 0}, + {"AppendSlice", Func, 0}, + {"Array", Const, 0}, + {"ArrayOf", Func, 5}, + {"Bool", Const, 0}, + {"BothDir", Const, 0}, + {"Chan", Const, 0}, + {"ChanDir", Type, 0}, + {"ChanOf", Func, 1}, + {"Complex128", Const, 0}, + {"Complex64", Const, 0}, + {"Copy", Func, 0}, + {"DeepEqual", Func, 0}, + {"Float32", Const, 0}, + {"Float64", Const, 0}, + {"Func", Const, 0}, + {"FuncOf", Func, 5}, + {"Indirect", Func, 0}, + {"Int", Const, 0}, + {"Int16", Const, 0}, + {"Int32", Const, 0}, + {"Int64", Const, 0}, + {"Int8", Const, 0}, + {"Interface", Const, 0}, + {"Invalid", Const, 0}, + {"Kind", Type, 0}, + {"MakeChan", Func, 0}, + {"MakeFunc", Func, 1}, + {"MakeMap", Func, 0}, + {"MakeMapWithSize", Func, 9}, + {"MakeSlice", Func, 0}, + {"Map", Const, 0}, + {"MapIter", Type, 12}, + {"MapOf", Func, 1}, + {"Method", Type, 0}, + {"Method.Func", Field, 0}, + {"Method.Index", Field, 0}, + {"Method.Name", Field, 0}, + {"Method.PkgPath", Field, 0}, + {"Method.Type", Field, 0}, + {"New", Func, 0}, + {"NewAt", Func, 0}, + {"Pointer", Const, 18}, + {"PointerTo", Func, 18}, + {"Ptr", Const, 0}, + {"PtrTo", Func, 0}, + {"RecvDir", Const, 0}, + {"Select", Func, 1}, + {"SelectCase", Type, 1}, + {"SelectCase.Chan", Field, 1}, + {"SelectCase.Dir", Field, 1}, + {"SelectCase.Send", Field, 1}, + {"SelectDefault", Const, 1}, + {"SelectDir", Type, 1}, + {"SelectRecv", Const, 1}, + {"SelectSend", Const, 1}, + {"SendDir", Const, 0}, + {"Slice", Const, 0}, + {"SliceHeader", Type, 0}, + {"SliceHeader.Cap", Field, 0}, + {"SliceHeader.Data", Field, 0}, + {"SliceHeader.Len", Field, 0}, + {"SliceOf", Func, 1}, + {"String", Const, 0}, + {"StringHeader", Type, 0}, + {"StringHeader.Data", Field, 0}, + {"StringHeader.Len", Field, 0}, + {"Struct", Const, 0}, + {"StructField", Type, 0}, + {"StructField.Anonymous", Field, 0}, + {"StructField.Index", Field, 0}, + {"StructField.Name", Field, 0}, + {"StructField.Offset", Field, 0}, + {"StructField.PkgPath", Field, 0}, + {"StructField.Tag", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructOf", Func, 7}, + {"StructTag", Type, 0}, + {"Swapper", Func, 8}, + {"Type", Type, 0}, + {"TypeFor", Func, 22}, + {"TypeOf", Func, 0}, + {"Uint", Const, 0}, + {"Uint16", Const, 0}, + {"Uint32", Const, 0}, + {"Uint64", Const, 0}, + {"Uint8", Const, 0}, + {"Uintptr", Const, 0}, + {"UnsafePointer", Const, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueError.Kind", Field, 0}, + {"ValueError.Method", Field, 0}, + {"ValueOf", Func, 0}, + {"VisibleFields", Func, 17}, + {"Zero", Func, 0}, + }, + "regexp": { + {"(*Regexp).Copy", Method, 6}, + {"(*Regexp).Expand", Method, 0}, + {"(*Regexp).ExpandString", Method, 0}, + {"(*Regexp).Find", Method, 0}, + {"(*Regexp).FindAll", Method, 0}, + {"(*Regexp).FindAllIndex", Method, 0}, + {"(*Regexp).FindAllString", Method, 0}, + {"(*Regexp).FindAllStringIndex", Method, 0}, + {"(*Regexp).FindAllStringSubmatch", Method, 0}, + {"(*Regexp).FindAllStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindAllSubmatch", Method, 0}, + {"(*Regexp).FindAllSubmatchIndex", Method, 0}, + {"(*Regexp).FindIndex", Method, 0}, + {"(*Regexp).FindReaderIndex", Method, 0}, + {"(*Regexp).FindReaderSubmatchIndex", Method, 0}, + {"(*Regexp).FindString", Method, 0}, + {"(*Regexp).FindStringIndex", Method, 0}, + {"(*Regexp).FindStringSubmatch", Method, 0}, + {"(*Regexp).FindStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindSubmatch", Method, 0}, + {"(*Regexp).FindSubmatchIndex", Method, 0}, + {"(*Regexp).LiteralPrefix", Method, 0}, + {"(*Regexp).Longest", Method, 1}, + {"(*Regexp).MarshalText", Method, 21}, + {"(*Regexp).Match", Method, 0}, + {"(*Regexp).MatchReader", Method, 0}, + {"(*Regexp).MatchString", Method, 0}, + {"(*Regexp).NumSubexp", Method, 0}, + {"(*Regexp).ReplaceAll", Method, 0}, + {"(*Regexp).ReplaceAllFunc", Method, 0}, + {"(*Regexp).ReplaceAllLiteral", Method, 0}, + {"(*Regexp).ReplaceAllLiteralString", Method, 0}, + {"(*Regexp).ReplaceAllString", Method, 0}, + {"(*Regexp).ReplaceAllStringFunc", Method, 0}, + {"(*Regexp).Split", Method, 1}, + {"(*Regexp).String", Method, 0}, + {"(*Regexp).SubexpIndex", Method, 15}, + {"(*Regexp).SubexpNames", Method, 0}, + {"(*Regexp).UnmarshalText", Method, 21}, + {"Compile", Func, 0}, + {"CompilePOSIX", Func, 0}, + {"Match", Func, 0}, + {"MatchReader", Func, 0}, + {"MatchString", Func, 0}, + {"MustCompile", Func, 0}, + {"MustCompilePOSIX", Func, 0}, + {"QuoteMeta", Func, 0}, + {"Regexp", Type, 0}, + }, + "regexp/syntax": { + {"(*Error).Error", Method, 0}, + {"(*Inst).MatchEmptyWidth", Method, 0}, + {"(*Inst).MatchRune", Method, 0}, + {"(*Inst).MatchRunePos", Method, 3}, + {"(*Inst).String", Method, 0}, + {"(*Prog).Prefix", Method, 0}, + {"(*Prog).StartCond", Method, 0}, + {"(*Prog).String", Method, 0}, + {"(*Regexp).CapNames", Method, 0}, + {"(*Regexp).Equal", Method, 0}, + {"(*Regexp).MaxCap", Method, 0}, + {"(*Regexp).Simplify", Method, 0}, + {"(*Regexp).String", Method, 0}, + {"(ErrorCode).String", Method, 0}, + {"(InstOp).String", Method, 3}, + {"(Op).String", Method, 11}, + {"ClassNL", Const, 0}, + {"Compile", Func, 0}, + {"DotNL", Const, 0}, + {"EmptyBeginLine", Const, 0}, + {"EmptyBeginText", Const, 0}, + {"EmptyEndLine", Const, 0}, + {"EmptyEndText", Const, 0}, + {"EmptyNoWordBoundary", Const, 0}, + {"EmptyOp", Type, 0}, + {"EmptyOpContext", Func, 0}, + {"EmptyWordBoundary", Const, 0}, + {"ErrInternalError", Const, 0}, + {"ErrInvalidCharClass", Const, 0}, + {"ErrInvalidCharRange", Const, 0}, + {"ErrInvalidEscape", Const, 0}, + {"ErrInvalidNamedCapture", Const, 0}, + {"ErrInvalidPerlOp", Const, 0}, + {"ErrInvalidRepeatOp", Const, 0}, + {"ErrInvalidRepeatSize", Const, 0}, + {"ErrInvalidUTF8", Const, 0}, + {"ErrLarge", Const, 20}, + {"ErrMissingBracket", Const, 0}, + {"ErrMissingParen", Const, 0}, + {"ErrMissingRepeatArgument", Const, 0}, + {"ErrNestingDepth", Const, 19}, + {"ErrTrailingBackslash", Const, 0}, + {"ErrUnexpectedParen", Const, 1}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Expr", Field, 0}, + {"ErrorCode", Type, 0}, + {"Flags", Type, 0}, + {"FoldCase", Const, 0}, + {"Inst", Type, 0}, + {"Inst.Arg", Field, 0}, + {"Inst.Op", Field, 0}, + {"Inst.Out", Field, 0}, + {"Inst.Rune", Field, 0}, + {"InstAlt", Const, 0}, + {"InstAltMatch", Const, 0}, + {"InstCapture", Const, 0}, + {"InstEmptyWidth", Const, 0}, + {"InstFail", Const, 0}, + {"InstMatch", Const, 0}, + {"InstNop", Const, 0}, + {"InstOp", Type, 0}, + {"InstRune", Const, 0}, + {"InstRune1", Const, 0}, + {"InstRuneAny", Const, 0}, + {"InstRuneAnyNotNL", Const, 0}, + {"IsWordChar", Func, 0}, + {"Literal", Const, 0}, + {"MatchNL", Const, 0}, + {"NonGreedy", Const, 0}, + {"OneLine", Const, 0}, + {"Op", Type, 0}, + {"OpAlternate", Const, 0}, + {"OpAnyChar", Const, 0}, + {"OpAnyCharNotNL", Const, 0}, + {"OpBeginLine", Const, 0}, + {"OpBeginText", Const, 0}, + {"OpCapture", Const, 0}, + {"OpCharClass", Const, 0}, + {"OpConcat", Const, 0}, + {"OpEmptyMatch", Const, 0}, + {"OpEndLine", Const, 0}, + {"OpEndText", Const, 0}, + {"OpLiteral", Const, 0}, + {"OpNoMatch", Const, 0}, + {"OpNoWordBoundary", Const, 0}, + {"OpPlus", Const, 0}, + {"OpQuest", Const, 0}, + {"OpRepeat", Const, 0}, + {"OpStar", Const, 0}, + {"OpWordBoundary", Const, 0}, + {"POSIX", Const, 0}, + {"Parse", Func, 0}, + {"Perl", Const, 0}, + {"PerlX", Const, 0}, + {"Prog", Type, 0}, + {"Prog.Inst", Field, 0}, + {"Prog.NumCap", Field, 0}, + {"Prog.Start", Field, 0}, + {"Regexp", Type, 0}, + {"Regexp.Cap", Field, 0}, + {"Regexp.Flags", Field, 0}, + {"Regexp.Max", Field, 0}, + {"Regexp.Min", Field, 0}, + {"Regexp.Name", Field, 0}, + {"Regexp.Op", Field, 0}, + {"Regexp.Rune", Field, 0}, + {"Regexp.Rune0", Field, 0}, + {"Regexp.Sub", Field, 0}, + {"Regexp.Sub0", Field, 0}, + {"Simple", Const, 0}, + {"UnicodeGroups", Const, 0}, + {"WasDollar", Const, 0}, + }, + "runtime": { + {"(*BlockProfileRecord).Stack", Method, 1}, + {"(*Frames).Next", Method, 7}, + {"(*Func).Entry", Method, 0}, + {"(*Func).FileLine", Method, 0}, + {"(*Func).Name", Method, 0}, + {"(*MemProfileRecord).InUseBytes", Method, 0}, + {"(*MemProfileRecord).InUseObjects", Method, 0}, + {"(*MemProfileRecord).Stack", Method, 0}, + {"(*PanicNilError).Error", Method, 21}, + {"(*PanicNilError).RuntimeError", Method, 21}, + {"(*Pinner).Pin", Method, 21}, + {"(*Pinner).Unpin", Method, 21}, + {"(*StackRecord).Stack", Method, 0}, + {"(*TypeAssertionError).Error", Method, 0}, + {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"BlockProfile", Func, 1}, + {"BlockProfileRecord", Type, 1}, + {"BlockProfileRecord.Count", Field, 1}, + {"BlockProfileRecord.Cycles", Field, 1}, + {"BlockProfileRecord.StackRecord", Field, 1}, + {"Breakpoint", Func, 0}, + {"CPUProfile", Func, 0}, + {"Caller", Func, 0}, + {"Callers", Func, 0}, + {"CallersFrames", Func, 7}, + {"Compiler", Const, 0}, + {"Error", Type, 0}, + {"Frame", Type, 7}, + {"Frame.Entry", Field, 7}, + {"Frame.File", Field, 7}, + {"Frame.Func", Field, 7}, + {"Frame.Function", Field, 7}, + {"Frame.Line", Field, 7}, + {"Frame.PC", Field, 7}, + {"Frames", Type, 7}, + {"Func", Type, 0}, + {"FuncForPC", Func, 0}, + {"GC", Func, 0}, + {"GOARCH", Const, 0}, + {"GOMAXPROCS", Func, 0}, + {"GOOS", Const, 0}, + {"GOROOT", Func, 0}, + {"Goexit", Func, 0}, + {"GoroutineProfile", Func, 0}, + {"Gosched", Func, 0}, + {"KeepAlive", Func, 7}, + {"LockOSThread", Func, 0}, + {"MemProfile", Func, 0}, + {"MemProfileRate", Var, 0}, + {"MemProfileRecord", Type, 0}, + {"MemProfileRecord.AllocBytes", Field, 0}, + {"MemProfileRecord.AllocObjects", Field, 0}, + {"MemProfileRecord.FreeBytes", Field, 0}, + {"MemProfileRecord.FreeObjects", Field, 0}, + {"MemProfileRecord.Stack0", Field, 0}, + {"MemStats", Type, 0}, + {"MemStats.Alloc", Field, 0}, + {"MemStats.BuckHashSys", Field, 0}, + {"MemStats.BySize", Field, 0}, + {"MemStats.DebugGC", Field, 0}, + {"MemStats.EnableGC", Field, 0}, + {"MemStats.Frees", Field, 0}, + {"MemStats.GCCPUFraction", Field, 5}, + {"MemStats.GCSys", Field, 2}, + {"MemStats.HeapAlloc", Field, 0}, + {"MemStats.HeapIdle", Field, 0}, + {"MemStats.HeapInuse", Field, 0}, + {"MemStats.HeapObjects", Field, 0}, + {"MemStats.HeapReleased", Field, 0}, + {"MemStats.HeapSys", Field, 0}, + {"MemStats.LastGC", Field, 0}, + {"MemStats.Lookups", Field, 0}, + {"MemStats.MCacheInuse", Field, 0}, + {"MemStats.MCacheSys", Field, 0}, + {"MemStats.MSpanInuse", Field, 0}, + {"MemStats.MSpanSys", Field, 0}, + {"MemStats.Mallocs", Field, 0}, + {"MemStats.NextGC", Field, 0}, + {"MemStats.NumForcedGC", Field, 8}, + {"MemStats.NumGC", Field, 0}, + {"MemStats.OtherSys", Field, 2}, + {"MemStats.PauseEnd", Field, 4}, + {"MemStats.PauseNs", Field, 0}, + {"MemStats.PauseTotalNs", Field, 0}, + {"MemStats.StackInuse", Field, 0}, + {"MemStats.StackSys", Field, 0}, + {"MemStats.Sys", Field, 0}, + {"MemStats.TotalAlloc", Field, 0}, + {"MutexProfile", Func, 8}, + {"NumCPU", Func, 0}, + {"NumCgoCall", Func, 0}, + {"NumGoroutine", Func, 0}, + {"PanicNilError", Type, 21}, + {"Pinner", Type, 21}, + {"ReadMemStats", Func, 0}, + {"ReadTrace", Func, 5}, + {"SetBlockProfileRate", Func, 1}, + {"SetCPUProfileRate", Func, 0}, + {"SetCgoTraceback", Func, 7}, + {"SetFinalizer", Func, 0}, + {"SetMutexProfileFraction", Func, 8}, + {"Stack", Func, 0}, + {"StackRecord", Type, 0}, + {"StackRecord.Stack0", Field, 0}, + {"StartTrace", Func, 5}, + {"StopTrace", Func, 5}, + {"ThreadCreateProfile", Func, 0}, + {"TypeAssertionError", Type, 0}, + {"UnlockOSThread", Func, 0}, + {"Version", Func, 0}, + }, + "runtime/cgo": { + {"(Handle).Delete", Method, 17}, + {"(Handle).Value", Method, 17}, + {"Handle", Type, 17}, + {"Incomplete", Type, 20}, + {"NewHandle", Func, 17}, + }, + "runtime/coverage": { + {"ClearCounters", Func, 20}, + {"WriteCounters", Func, 20}, + {"WriteCountersDir", Func, 20}, + {"WriteMeta", Func, 20}, + {"WriteMetaDir", Func, 20}, + }, + "runtime/debug": { + {"(*BuildInfo).String", Method, 18}, + {"BuildInfo", Type, 12}, + {"BuildInfo.Deps", Field, 12}, + {"BuildInfo.GoVersion", Field, 18}, + {"BuildInfo.Main", Field, 12}, + {"BuildInfo.Path", Field, 12}, + {"BuildInfo.Settings", Field, 18}, + {"BuildSetting", Type, 18}, + {"BuildSetting.Key", Field, 18}, + {"BuildSetting.Value", Field, 18}, + {"FreeOSMemory", Func, 1}, + {"GCStats", Type, 1}, + {"GCStats.LastGC", Field, 1}, + {"GCStats.NumGC", Field, 1}, + {"GCStats.Pause", Field, 1}, + {"GCStats.PauseEnd", Field, 4}, + {"GCStats.PauseQuantiles", Field, 1}, + {"GCStats.PauseTotal", Field, 1}, + {"Module", Type, 12}, + {"Module.Path", Field, 12}, + {"Module.Replace", Field, 12}, + {"Module.Sum", Field, 12}, + {"Module.Version", Field, 12}, + {"ParseBuildInfo", Func, 18}, + {"PrintStack", Func, 0}, + {"ReadBuildInfo", Func, 12}, + {"ReadGCStats", Func, 1}, + {"SetGCPercent", Func, 1}, + {"SetMaxStack", Func, 2}, + {"SetMaxThreads", Func, 2}, + {"SetMemoryLimit", Func, 19}, + {"SetPanicOnFault", Func, 3}, + {"SetTraceback", Func, 6}, + {"Stack", Func, 0}, + {"WriteHeapDump", Func, 3}, + }, + "runtime/metrics": { + {"(Value).Float64", Method, 16}, + {"(Value).Float64Histogram", Method, 16}, + {"(Value).Kind", Method, 16}, + {"(Value).Uint64", Method, 16}, + {"All", Func, 16}, + {"Description", Type, 16}, + {"Description.Cumulative", Field, 16}, + {"Description.Description", Field, 16}, + {"Description.Kind", Field, 16}, + {"Description.Name", Field, 16}, + {"Float64Histogram", Type, 16}, + {"Float64Histogram.Buckets", Field, 16}, + {"Float64Histogram.Counts", Field, 16}, + {"KindBad", Const, 16}, + {"KindFloat64", Const, 16}, + {"KindFloat64Histogram", Const, 16}, + {"KindUint64", Const, 16}, + {"Read", Func, 16}, + {"Sample", Type, 16}, + {"Sample.Name", Field, 16}, + {"Sample.Value", Field, 16}, + {"Value", Type, 16}, + {"ValueKind", Type, 16}, + }, + "runtime/pprof": { + {"(*Profile).Add", Method, 0}, + {"(*Profile).Count", Method, 0}, + {"(*Profile).Name", Method, 0}, + {"(*Profile).Remove", Method, 0}, + {"(*Profile).WriteTo", Method, 0}, + {"Do", Func, 9}, + {"ForLabels", Func, 9}, + {"Label", Func, 9}, + {"LabelSet", Type, 9}, + {"Labels", Func, 9}, + {"Lookup", Func, 0}, + {"NewProfile", Func, 0}, + {"Profile", Type, 0}, + {"Profiles", Func, 0}, + {"SetGoroutineLabels", Func, 9}, + {"StartCPUProfile", Func, 0}, + {"StopCPUProfile", Func, 0}, + {"WithLabels", Func, 9}, + {"WriteHeapProfile", Func, 0}, + }, + "runtime/trace": { + {"(*Region).End", Method, 11}, + {"(*Task).End", Method, 11}, + {"IsEnabled", Func, 11}, + {"Log", Func, 11}, + {"Logf", Func, 11}, + {"NewTask", Func, 11}, + {"Region", Type, 11}, + {"Start", Func, 5}, + {"StartRegion", Func, 11}, + {"Stop", Func, 5}, + {"Task", Type, 11}, + {"WithRegion", Func, 11}, + }, + "slices": { + {"BinarySearch", Func, 21}, + {"BinarySearchFunc", Func, 21}, + {"Clip", Func, 21}, + {"Clone", Func, 21}, + {"Compact", Func, 21}, + {"CompactFunc", Func, 21}, + {"Compare", Func, 21}, + {"CompareFunc", Func, 21}, + {"Concat", Func, 22}, + {"Contains", Func, 21}, + {"ContainsFunc", Func, 21}, + {"Delete", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + {"Grow", Func, 21}, + {"Index", Func, 21}, + {"IndexFunc", Func, 21}, + {"Insert", Func, 21}, + {"IsSorted", Func, 21}, + {"IsSortedFunc", Func, 21}, + {"Max", Func, 21}, + {"MaxFunc", Func, 21}, + {"Min", Func, 21}, + {"MinFunc", Func, 21}, + {"Replace", Func, 21}, + {"Reverse", Func, 21}, + {"Sort", Func, 21}, + {"SortFunc", Func, 21}, + {"SortStableFunc", Func, 21}, + }, + "sort": { + {"(Float64Slice).Len", Method, 0}, + {"(Float64Slice).Less", Method, 0}, + {"(Float64Slice).Search", Method, 0}, + {"(Float64Slice).Sort", Method, 0}, + {"(Float64Slice).Swap", Method, 0}, + {"(IntSlice).Len", Method, 0}, + {"(IntSlice).Less", Method, 0}, + {"(IntSlice).Search", Method, 0}, + {"(IntSlice).Sort", Method, 0}, + {"(IntSlice).Swap", Method, 0}, + {"(StringSlice).Len", Method, 0}, + {"(StringSlice).Less", Method, 0}, + {"(StringSlice).Search", Method, 0}, + {"(StringSlice).Sort", Method, 0}, + {"(StringSlice).Swap", Method, 0}, + {"Find", Func, 19}, + {"Float64Slice", Type, 0}, + {"Float64s", Func, 0}, + {"Float64sAreSorted", Func, 0}, + {"IntSlice", Type, 0}, + {"Interface", Type, 0}, + {"Ints", Func, 0}, + {"IntsAreSorted", Func, 0}, + {"IsSorted", Func, 0}, + {"Reverse", Func, 1}, + {"Search", Func, 0}, + {"SearchFloat64s", Func, 0}, + {"SearchInts", Func, 0}, + {"SearchStrings", Func, 0}, + {"Slice", Func, 8}, + {"SliceIsSorted", Func, 8}, + {"SliceStable", Func, 8}, + {"Sort", Func, 0}, + {"Stable", Func, 2}, + {"StringSlice", Type, 0}, + {"Strings", Func, 0}, + {"StringsAreSorted", Func, 0}, + }, + "strconv": { + {"(*NumError).Error", Method, 0}, + {"(*NumError).Unwrap", Method, 14}, + {"AppendBool", Func, 0}, + {"AppendFloat", Func, 0}, + {"AppendInt", Func, 0}, + {"AppendQuote", Func, 0}, + {"AppendQuoteRune", Func, 0}, + {"AppendQuoteRuneToASCII", Func, 0}, + {"AppendQuoteRuneToGraphic", Func, 6}, + {"AppendQuoteToASCII", Func, 0}, + {"AppendQuoteToGraphic", Func, 6}, + {"AppendUint", Func, 0}, + {"Atoi", Func, 0}, + {"CanBackquote", Func, 0}, + {"ErrRange", Var, 0}, + {"ErrSyntax", Var, 0}, + {"FormatBool", Func, 0}, + {"FormatComplex", Func, 15}, + {"FormatFloat", Func, 0}, + {"FormatInt", Func, 0}, + {"FormatUint", Func, 0}, + {"IntSize", Const, 0}, + {"IsGraphic", Func, 6}, + {"IsPrint", Func, 0}, + {"Itoa", Func, 0}, + {"NumError", Type, 0}, + {"NumError.Err", Field, 0}, + {"NumError.Func", Field, 0}, + {"NumError.Num", Field, 0}, + {"ParseBool", Func, 0}, + {"ParseComplex", Func, 15}, + {"ParseFloat", Func, 0}, + {"ParseInt", Func, 0}, + {"ParseUint", Func, 0}, + {"Quote", Func, 0}, + {"QuoteRune", Func, 0}, + {"QuoteRuneToASCII", Func, 0}, + {"QuoteRuneToGraphic", Func, 6}, + {"QuoteToASCII", Func, 0}, + {"QuoteToGraphic", Func, 6}, + {"QuotedPrefix", Func, 17}, + {"Unquote", Func, 0}, + {"UnquoteChar", Func, 0}, + }, + "strings": { + {"(*Builder).Cap", Method, 12}, + {"(*Builder).Grow", Method, 10}, + {"(*Builder).Len", Method, 10}, + {"(*Builder).Reset", Method, 10}, + {"(*Builder).String", Method, 10}, + {"(*Builder).Write", Method, 10}, + {"(*Builder).WriteByte", Method, 10}, + {"(*Builder).WriteRune", Method, 10}, + {"(*Builder).WriteString", Method, 10}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Replacer).Replace", Method, 0}, + {"(*Replacer).WriteString", Method, 0}, + {"Builder", Type, 10}, + {"Clone", Func, 18}, + {"Compare", Func, 5}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 0}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 0}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"EqualFold", Func, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 2}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"NewReader", Func, 0}, + {"NewReplacer", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Replacer", Type, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "sync": { + {"(*Cond).Broadcast", Method, 0}, + {"(*Cond).Signal", Method, 0}, + {"(*Cond).Wait", Method, 0}, + {"(*Map).CompareAndDelete", Method, 20}, + {"(*Map).CompareAndSwap", Method, 20}, + {"(*Map).Delete", Method, 9}, + {"(*Map).Load", Method, 9}, + {"(*Map).LoadAndDelete", Method, 15}, + {"(*Map).LoadOrStore", Method, 9}, + {"(*Map).Range", Method, 9}, + {"(*Map).Store", Method, 9}, + {"(*Map).Swap", Method, 20}, + {"(*Mutex).Lock", Method, 0}, + {"(*Mutex).TryLock", Method, 18}, + {"(*Mutex).Unlock", Method, 0}, + {"(*Once).Do", Method, 0}, + {"(*Pool).Get", Method, 3}, + {"(*Pool).Put", Method, 3}, + {"(*RWMutex).Lock", Method, 0}, + {"(*RWMutex).RLock", Method, 0}, + {"(*RWMutex).RLocker", Method, 0}, + {"(*RWMutex).RUnlock", Method, 0}, + {"(*RWMutex).TryLock", Method, 18}, + {"(*RWMutex).TryRLock", Method, 18}, + {"(*RWMutex).Unlock", Method, 0}, + {"(*WaitGroup).Add", Method, 0}, + {"(*WaitGroup).Done", Method, 0}, + {"(*WaitGroup).Wait", Method, 0}, + {"Cond", Type, 0}, + {"Cond.L", Field, 0}, + {"Locker", Type, 0}, + {"Map", Type, 9}, + {"Mutex", Type, 0}, + {"NewCond", Func, 0}, + {"Once", Type, 0}, + {"OnceFunc", Func, 21}, + {"OnceValue", Func, 21}, + {"OnceValues", Func, 21}, + {"Pool", Type, 3}, + {"Pool.New", Field, 3}, + {"RWMutex", Type, 0}, + {"WaitGroup", Type, 0}, + }, + "sync/atomic": { + {"(*Bool).CompareAndSwap", Method, 19}, + {"(*Bool).Load", Method, 19}, + {"(*Bool).Store", Method, 19}, + {"(*Bool).Swap", Method, 19}, + {"(*Int32).Add", Method, 19}, + {"(*Int32).CompareAndSwap", Method, 19}, + {"(*Int32).Load", Method, 19}, + {"(*Int32).Store", Method, 19}, + {"(*Int32).Swap", Method, 19}, + {"(*Int64).Add", Method, 19}, + {"(*Int64).CompareAndSwap", Method, 19}, + {"(*Int64).Load", Method, 19}, + {"(*Int64).Store", Method, 19}, + {"(*Int64).Swap", Method, 19}, + {"(*Pointer).CompareAndSwap", Method, 19}, + {"(*Pointer).Load", Method, 19}, + {"(*Pointer).Store", Method, 19}, + {"(*Pointer).Swap", Method, 19}, + {"(*Uint32).Add", Method, 19}, + {"(*Uint32).CompareAndSwap", Method, 19}, + {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Store", Method, 19}, + {"(*Uint32).Swap", Method, 19}, + {"(*Uint64).Add", Method, 19}, + {"(*Uint64).CompareAndSwap", Method, 19}, + {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Store", Method, 19}, + {"(*Uint64).Swap", Method, 19}, + {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).CompareAndSwap", Method, 19}, + {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Store", Method, 19}, + {"(*Uintptr).Swap", Method, 19}, + {"(*Value).CompareAndSwap", Method, 17}, + {"(*Value).Load", Method, 4}, + {"(*Value).Store", Method, 4}, + {"(*Value).Swap", Method, 17}, + {"AddInt32", Func, 0}, + {"AddInt64", Func, 0}, + {"AddUint32", Func, 0}, + {"AddUint64", Func, 0}, + {"AddUintptr", Func, 0}, + {"Bool", Type, 19}, + {"CompareAndSwapInt32", Func, 0}, + {"CompareAndSwapInt64", Func, 0}, + {"CompareAndSwapPointer", Func, 0}, + {"CompareAndSwapUint32", Func, 0}, + {"CompareAndSwapUint64", Func, 0}, + {"CompareAndSwapUintptr", Func, 0}, + {"Int32", Type, 19}, + {"Int64", Type, 19}, + {"LoadInt32", Func, 0}, + {"LoadInt64", Func, 0}, + {"LoadPointer", Func, 0}, + {"LoadUint32", Func, 0}, + {"LoadUint64", Func, 0}, + {"LoadUintptr", Func, 0}, + {"Pointer", Type, 19}, + {"StoreInt32", Func, 0}, + {"StoreInt64", Func, 0}, + {"StorePointer", Func, 0}, + {"StoreUint32", Func, 0}, + {"StoreUint64", Func, 0}, + {"StoreUintptr", Func, 0}, + {"SwapInt32", Func, 2}, + {"SwapInt64", Func, 2}, + {"SwapPointer", Func, 2}, + {"SwapUint32", Func, 2}, + {"SwapUint64", Func, 2}, + {"SwapUintptr", Func, 2}, + {"Uint32", Type, 19}, + {"Uint64", Type, 19}, + {"Uintptr", Type, 19}, + {"Value", Type, 4}, + }, + "syscall": { + {"(*Cmsghdr).SetLen", Method, 0}, + {"(*DLL).FindProc", Method, 0}, + {"(*DLL).MustFindProc", Method, 0}, + {"(*DLL).Release", Method, 0}, + {"(*DLLError).Error", Method, 0}, + {"(*DLLError).Unwrap", Method, 16}, + {"(*Filetime).Nanoseconds", Method, 0}, + {"(*Iovec).SetLen", Method, 0}, + {"(*LazyDLL).Handle", Method, 0}, + {"(*LazyDLL).Load", Method, 0}, + {"(*LazyDLL).NewProc", Method, 0}, + {"(*LazyProc).Addr", Method, 0}, + {"(*LazyProc).Call", Method, 0}, + {"(*LazyProc).Find", Method, 0}, + {"(*Msghdr).SetControllen", Method, 0}, + {"(*Proc).Addr", Method, 0}, + {"(*Proc).Call", Method, 0}, + {"(*PtraceRegs).PC", Method, 0}, + {"(*PtraceRegs).SetPC", Method, 0}, + {"(*RawSockaddrAny).Sockaddr", Method, 0}, + {"(*SID).Copy", Method, 0}, + {"(*SID).Len", Method, 0}, + {"(*SID).LookupAccount", Method, 0}, + {"(*SID).String", Method, 0}, + {"(*Timespec).Nano", Method, 0}, + {"(*Timespec).Unix", Method, 0}, + {"(*Timeval).Nano", Method, 0}, + {"(*Timeval).Nanoseconds", Method, 0}, + {"(*Timeval).Unix", Method, 0}, + {"(Errno).Error", Method, 0}, + {"(Errno).Is", Method, 13}, + {"(Errno).Temporary", Method, 0}, + {"(Errno).Timeout", Method, 0}, + {"(Signal).Signal", Method, 0}, + {"(Signal).String", Method, 0}, + {"(Token).Close", Method, 0}, + {"(Token).GetTokenPrimaryGroup", Method, 0}, + {"(Token).GetTokenUser", Method, 0}, + {"(Token).GetUserProfileDirectory", Method, 0}, + {"(WaitStatus).Continued", Method, 0}, + {"(WaitStatus).CoreDump", Method, 0}, + {"(WaitStatus).ExitStatus", Method, 0}, + {"(WaitStatus).Exited", Method, 0}, + {"(WaitStatus).Signal", Method, 0}, + {"(WaitStatus).Signaled", Method, 0}, + {"(WaitStatus).StopSignal", Method, 0}, + {"(WaitStatus).Stopped", Method, 0}, + {"(WaitStatus).TrapCause", Method, 0}, + {"AF_ALG", Const, 0}, + {"AF_APPLETALK", Const, 0}, + {"AF_ARP", Const, 0}, + {"AF_ASH", Const, 0}, + {"AF_ATM", Const, 0}, + {"AF_ATMPVC", Const, 0}, + {"AF_ATMSVC", Const, 0}, + {"AF_AX25", Const, 0}, + {"AF_BLUETOOTH", Const, 0}, + {"AF_BRIDGE", Const, 0}, + {"AF_CAIF", Const, 0}, + {"AF_CAN", Const, 0}, + {"AF_CCITT", Const, 0}, + {"AF_CHAOS", Const, 0}, + {"AF_CNT", Const, 0}, + {"AF_COIP", Const, 0}, + {"AF_DATAKIT", Const, 0}, + {"AF_DECnet", Const, 0}, + {"AF_DLI", Const, 0}, + {"AF_E164", Const, 0}, + {"AF_ECMA", Const, 0}, + {"AF_ECONET", Const, 0}, + {"AF_ENCAP", Const, 1}, + {"AF_FILE", Const, 0}, + {"AF_HYLINK", Const, 0}, + {"AF_IEEE80211", Const, 0}, + {"AF_IEEE802154", Const, 0}, + {"AF_IMPLINK", Const, 0}, + {"AF_INET", Const, 0}, + {"AF_INET6", Const, 0}, + {"AF_INET6_SDP", Const, 3}, + {"AF_INET_SDP", Const, 3}, + {"AF_IPX", Const, 0}, + {"AF_IRDA", Const, 0}, + {"AF_ISDN", Const, 0}, + {"AF_ISO", Const, 0}, + {"AF_IUCV", Const, 0}, + {"AF_KEY", Const, 0}, + {"AF_LAT", Const, 0}, + {"AF_LINK", Const, 0}, + {"AF_LLC", Const, 0}, + {"AF_LOCAL", Const, 0}, + {"AF_MAX", Const, 0}, + {"AF_MPLS", Const, 1}, + {"AF_NATM", Const, 0}, + {"AF_NDRV", Const, 0}, + {"AF_NETBEUI", Const, 0}, + {"AF_NETBIOS", Const, 0}, + {"AF_NETGRAPH", Const, 0}, + {"AF_NETLINK", Const, 0}, + {"AF_NETROM", Const, 0}, + {"AF_NS", Const, 0}, + {"AF_OROUTE", Const, 1}, + {"AF_OSI", Const, 0}, + {"AF_PACKET", Const, 0}, + {"AF_PHONET", Const, 0}, + {"AF_PPP", Const, 0}, + {"AF_PPPOX", Const, 0}, + {"AF_PUP", Const, 0}, + {"AF_RDS", Const, 0}, + {"AF_RESERVED_36", Const, 0}, + {"AF_ROSE", Const, 0}, + {"AF_ROUTE", Const, 0}, + {"AF_RXRPC", Const, 0}, + {"AF_SCLUSTER", Const, 0}, + {"AF_SECURITY", Const, 0}, + {"AF_SIP", Const, 0}, + {"AF_SLOW", Const, 0}, + {"AF_SNA", Const, 0}, + {"AF_SYSTEM", Const, 0}, + {"AF_TIPC", Const, 0}, + {"AF_UNIX", Const, 0}, + {"AF_UNSPEC", Const, 0}, + {"AF_UTUN", Const, 16}, + {"AF_VENDOR00", Const, 0}, + {"AF_VENDOR01", Const, 0}, + {"AF_VENDOR02", Const, 0}, + {"AF_VENDOR03", Const, 0}, + {"AF_VENDOR04", Const, 0}, + {"AF_VENDOR05", Const, 0}, + {"AF_VENDOR06", Const, 0}, + {"AF_VENDOR07", Const, 0}, + {"AF_VENDOR08", Const, 0}, + {"AF_VENDOR09", Const, 0}, + {"AF_VENDOR10", Const, 0}, + {"AF_VENDOR11", Const, 0}, + {"AF_VENDOR12", Const, 0}, + {"AF_VENDOR13", Const, 0}, + {"AF_VENDOR14", Const, 0}, + {"AF_VENDOR15", Const, 0}, + {"AF_VENDOR16", Const, 0}, + {"AF_VENDOR17", Const, 0}, + {"AF_VENDOR18", Const, 0}, + {"AF_VENDOR19", Const, 0}, + {"AF_VENDOR20", Const, 0}, + {"AF_VENDOR21", Const, 0}, + {"AF_VENDOR22", Const, 0}, + {"AF_VENDOR23", Const, 0}, + {"AF_VENDOR24", Const, 0}, + {"AF_VENDOR25", Const, 0}, + {"AF_VENDOR26", Const, 0}, + {"AF_VENDOR27", Const, 0}, + {"AF_VENDOR28", Const, 0}, + {"AF_VENDOR29", Const, 0}, + {"AF_VENDOR30", Const, 0}, + {"AF_VENDOR31", Const, 0}, + {"AF_VENDOR32", Const, 0}, + {"AF_VENDOR33", Const, 0}, + {"AF_VENDOR34", Const, 0}, + {"AF_VENDOR35", Const, 0}, + {"AF_VENDOR36", Const, 0}, + {"AF_VENDOR37", Const, 0}, + {"AF_VENDOR38", Const, 0}, + {"AF_VENDOR39", Const, 0}, + {"AF_VENDOR40", Const, 0}, + {"AF_VENDOR41", Const, 0}, + {"AF_VENDOR42", Const, 0}, + {"AF_VENDOR43", Const, 0}, + {"AF_VENDOR44", Const, 0}, + {"AF_VENDOR45", Const, 0}, + {"AF_VENDOR46", Const, 0}, + {"AF_VENDOR47", Const, 0}, + {"AF_WANPIPE", Const, 0}, + {"AF_X25", Const, 0}, + {"AI_CANONNAME", Const, 1}, + {"AI_NUMERICHOST", Const, 1}, + {"AI_PASSIVE", Const, 1}, + {"APPLICATION_ERROR", Const, 0}, + {"ARPHRD_ADAPT", Const, 0}, + {"ARPHRD_APPLETLK", Const, 0}, + {"ARPHRD_ARCNET", Const, 0}, + {"ARPHRD_ASH", Const, 0}, + {"ARPHRD_ATM", Const, 0}, + {"ARPHRD_AX25", Const, 0}, + {"ARPHRD_BIF", Const, 0}, + {"ARPHRD_CHAOS", Const, 0}, + {"ARPHRD_CISCO", Const, 0}, + {"ARPHRD_CSLIP", Const, 0}, + {"ARPHRD_CSLIP6", Const, 0}, + {"ARPHRD_DDCMP", Const, 0}, + {"ARPHRD_DLCI", Const, 0}, + {"ARPHRD_ECONET", Const, 0}, + {"ARPHRD_EETHER", Const, 0}, + {"ARPHRD_ETHER", Const, 0}, + {"ARPHRD_EUI64", Const, 0}, + {"ARPHRD_FCAL", Const, 0}, + {"ARPHRD_FCFABRIC", Const, 0}, + {"ARPHRD_FCPL", Const, 0}, + {"ARPHRD_FCPP", Const, 0}, + {"ARPHRD_FDDI", Const, 0}, + {"ARPHRD_FRAD", Const, 0}, + {"ARPHRD_FRELAY", Const, 1}, + {"ARPHRD_HDLC", Const, 0}, + {"ARPHRD_HIPPI", Const, 0}, + {"ARPHRD_HWX25", Const, 0}, + {"ARPHRD_IEEE1394", Const, 0}, + {"ARPHRD_IEEE802", Const, 0}, + {"ARPHRD_IEEE80211", Const, 0}, + {"ARPHRD_IEEE80211_PRISM", Const, 0}, + {"ARPHRD_IEEE80211_RADIOTAP", Const, 0}, + {"ARPHRD_IEEE802154", Const, 0}, + {"ARPHRD_IEEE802154_PHY", Const, 0}, + {"ARPHRD_IEEE802_TR", Const, 0}, + {"ARPHRD_INFINIBAND", Const, 0}, + {"ARPHRD_IPDDP", Const, 0}, + {"ARPHRD_IPGRE", Const, 0}, + {"ARPHRD_IRDA", Const, 0}, + {"ARPHRD_LAPB", Const, 0}, + {"ARPHRD_LOCALTLK", Const, 0}, + {"ARPHRD_LOOPBACK", Const, 0}, + {"ARPHRD_METRICOM", Const, 0}, + {"ARPHRD_NETROM", Const, 0}, + {"ARPHRD_NONE", Const, 0}, + {"ARPHRD_PIMREG", Const, 0}, + {"ARPHRD_PPP", Const, 0}, + {"ARPHRD_PRONET", Const, 0}, + {"ARPHRD_RAWHDLC", Const, 0}, + {"ARPHRD_ROSE", Const, 0}, + {"ARPHRD_RSRVD", Const, 0}, + {"ARPHRD_SIT", Const, 0}, + {"ARPHRD_SKIP", Const, 0}, + {"ARPHRD_SLIP", Const, 0}, + {"ARPHRD_SLIP6", Const, 0}, + {"ARPHRD_STRIP", Const, 1}, + {"ARPHRD_TUNNEL", Const, 0}, + {"ARPHRD_TUNNEL6", Const, 0}, + {"ARPHRD_VOID", Const, 0}, + {"ARPHRD_X25", Const, 0}, + {"AUTHTYPE_CLIENT", Const, 0}, + {"AUTHTYPE_SERVER", Const, 0}, + {"Accept", Func, 0}, + {"Accept4", Func, 1}, + {"AcceptEx", Func, 0}, + {"Access", Func, 0}, + {"Acct", Func, 0}, + {"AddrinfoW", Type, 1}, + {"AddrinfoW.Addr", Field, 1}, + {"AddrinfoW.Addrlen", Field, 1}, + {"AddrinfoW.Canonname", Field, 1}, + {"AddrinfoW.Family", Field, 1}, + {"AddrinfoW.Flags", Field, 1}, + {"AddrinfoW.Next", Field, 1}, + {"AddrinfoW.Protocol", Field, 1}, + {"AddrinfoW.Socktype", Field, 1}, + {"Adjtime", Func, 0}, + {"Adjtimex", Func, 0}, + {"AllThreadsSyscall", Func, 16}, + {"AllThreadsSyscall6", Func, 16}, + {"AttachLsf", Func, 0}, + {"B0", Const, 0}, + {"B1000000", Const, 0}, + {"B110", Const, 0}, + {"B115200", Const, 0}, + {"B1152000", Const, 0}, + {"B1200", Const, 0}, + {"B134", Const, 0}, + {"B14400", Const, 1}, + {"B150", Const, 0}, + {"B1500000", Const, 0}, + {"B1800", Const, 0}, + {"B19200", Const, 0}, + {"B200", Const, 0}, + {"B2000000", Const, 0}, + {"B230400", Const, 0}, + {"B2400", Const, 0}, + {"B2500000", Const, 0}, + {"B28800", Const, 1}, + {"B300", Const, 0}, + {"B3000000", Const, 0}, + {"B3500000", Const, 0}, + {"B38400", Const, 0}, + {"B4000000", Const, 0}, + {"B460800", Const, 0}, + {"B4800", Const, 0}, + {"B50", Const, 0}, + {"B500000", Const, 0}, + {"B57600", Const, 0}, + {"B576000", Const, 0}, + {"B600", Const, 0}, + {"B7200", Const, 1}, + {"B75", Const, 0}, + {"B76800", Const, 1}, + {"B921600", Const, 0}, + {"B9600", Const, 0}, + {"BASE_PROTOCOL", Const, 2}, + {"BIOCFEEDBACK", Const, 0}, + {"BIOCFLUSH", Const, 0}, + {"BIOCGBLEN", Const, 0}, + {"BIOCGDIRECTION", Const, 0}, + {"BIOCGDIRFILT", Const, 1}, + {"BIOCGDLT", Const, 0}, + {"BIOCGDLTLIST", Const, 0}, + {"BIOCGETBUFMODE", Const, 0}, + {"BIOCGETIF", Const, 0}, + {"BIOCGETZMAX", Const, 0}, + {"BIOCGFEEDBACK", Const, 1}, + {"BIOCGFILDROP", Const, 1}, + {"BIOCGHDRCMPLT", Const, 0}, + {"BIOCGRSIG", Const, 0}, + {"BIOCGRTIMEOUT", Const, 0}, + {"BIOCGSEESENT", Const, 0}, + {"BIOCGSTATS", Const, 0}, + {"BIOCGSTATSOLD", Const, 1}, + {"BIOCGTSTAMP", Const, 1}, + {"BIOCIMMEDIATE", Const, 0}, + {"BIOCLOCK", Const, 0}, + {"BIOCPROMISC", Const, 0}, + {"BIOCROTZBUF", Const, 0}, + {"BIOCSBLEN", Const, 0}, + {"BIOCSDIRECTION", Const, 0}, + {"BIOCSDIRFILT", Const, 1}, + {"BIOCSDLT", Const, 0}, + {"BIOCSETBUFMODE", Const, 0}, + {"BIOCSETF", Const, 0}, + {"BIOCSETFNR", Const, 0}, + {"BIOCSETIF", Const, 0}, + {"BIOCSETWF", Const, 0}, + {"BIOCSETZBUF", Const, 0}, + {"BIOCSFEEDBACK", Const, 1}, + {"BIOCSFILDROP", Const, 1}, + {"BIOCSHDRCMPLT", Const, 0}, + {"BIOCSRSIG", Const, 0}, + {"BIOCSRTIMEOUT", Const, 0}, + {"BIOCSSEESENT", Const, 0}, + {"BIOCSTCPF", Const, 1}, + {"BIOCSTSTAMP", Const, 1}, + {"BIOCSUDPF", Const, 1}, + {"BIOCVERSION", Const, 0}, + {"BPF_A", Const, 0}, + {"BPF_ABS", Const, 0}, + {"BPF_ADD", Const, 0}, + {"BPF_ALIGNMENT", Const, 0}, + {"BPF_ALIGNMENT32", Const, 1}, + {"BPF_ALU", Const, 0}, + {"BPF_AND", Const, 0}, + {"BPF_B", Const, 0}, + {"BPF_BUFMODE_BUFFER", Const, 0}, + {"BPF_BUFMODE_ZBUF", Const, 0}, + {"BPF_DFLTBUFSIZE", Const, 1}, + {"BPF_DIRECTION_IN", Const, 1}, + {"BPF_DIRECTION_OUT", Const, 1}, + {"BPF_DIV", Const, 0}, + {"BPF_H", Const, 0}, + {"BPF_IMM", Const, 0}, + {"BPF_IND", Const, 0}, + {"BPF_JA", Const, 0}, + {"BPF_JEQ", Const, 0}, + {"BPF_JGE", Const, 0}, + {"BPF_JGT", Const, 0}, + {"BPF_JMP", Const, 0}, + {"BPF_JSET", Const, 0}, + {"BPF_K", Const, 0}, + {"BPF_LD", Const, 0}, + {"BPF_LDX", Const, 0}, + {"BPF_LEN", Const, 0}, + {"BPF_LSH", Const, 0}, + {"BPF_MAJOR_VERSION", Const, 0}, + {"BPF_MAXBUFSIZE", Const, 0}, + {"BPF_MAXINSNS", Const, 0}, + {"BPF_MEM", Const, 0}, + {"BPF_MEMWORDS", Const, 0}, + {"BPF_MINBUFSIZE", Const, 0}, + {"BPF_MINOR_VERSION", Const, 0}, + {"BPF_MISC", Const, 0}, + {"BPF_MSH", Const, 0}, + {"BPF_MUL", Const, 0}, + {"BPF_NEG", Const, 0}, + {"BPF_OR", Const, 0}, + {"BPF_RELEASE", Const, 0}, + {"BPF_RET", Const, 0}, + {"BPF_RSH", Const, 0}, + {"BPF_ST", Const, 0}, + {"BPF_STX", Const, 0}, + {"BPF_SUB", Const, 0}, + {"BPF_TAX", Const, 0}, + {"BPF_TXA", Const, 0}, + {"BPF_T_BINTIME", Const, 1}, + {"BPF_T_BINTIME_FAST", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_FAST", Const, 1}, + {"BPF_T_FLAG_MASK", Const, 1}, + {"BPF_T_FORMAT_MASK", Const, 1}, + {"BPF_T_MICROTIME", Const, 1}, + {"BPF_T_MICROTIME_FAST", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_MONOTONIC", Const, 1}, + {"BPF_T_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NANOTIME", Const, 1}, + {"BPF_T_NANOTIME_FAST", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NONE", Const, 1}, + {"BPF_T_NORMAL", Const, 1}, + {"BPF_W", Const, 0}, + {"BPF_X", Const, 0}, + {"BRKINT", Const, 0}, + {"Bind", Func, 0}, + {"BindToDevice", Func, 0}, + {"BpfBuflen", Func, 0}, + {"BpfDatalink", Func, 0}, + {"BpfHdr", Type, 0}, + {"BpfHdr.Caplen", Field, 0}, + {"BpfHdr.Datalen", Field, 0}, + {"BpfHdr.Hdrlen", Field, 0}, + {"BpfHdr.Pad_cgo_0", Field, 0}, + {"BpfHdr.Tstamp", Field, 0}, + {"BpfHeadercmpl", Func, 0}, + {"BpfInsn", Type, 0}, + {"BpfInsn.Code", Field, 0}, + {"BpfInsn.Jf", Field, 0}, + {"BpfInsn.Jt", Field, 0}, + {"BpfInsn.K", Field, 0}, + {"BpfInterface", Func, 0}, + {"BpfJump", Func, 0}, + {"BpfProgram", Type, 0}, + {"BpfProgram.Insns", Field, 0}, + {"BpfProgram.Len", Field, 0}, + {"BpfProgram.Pad_cgo_0", Field, 0}, + {"BpfStat", Type, 0}, + {"BpfStat.Capt", Field, 2}, + {"BpfStat.Drop", Field, 0}, + {"BpfStat.Padding", Field, 2}, + {"BpfStat.Recv", Field, 0}, + {"BpfStats", Func, 0}, + {"BpfStmt", Func, 0}, + {"BpfTimeout", Func, 0}, + {"BpfTimeval", Type, 2}, + {"BpfTimeval.Sec", Field, 2}, + {"BpfTimeval.Usec", Field, 2}, + {"BpfVersion", Type, 0}, + {"BpfVersion.Major", Field, 0}, + {"BpfVersion.Minor", Field, 0}, + {"BpfZbuf", Type, 0}, + {"BpfZbuf.Bufa", Field, 0}, + {"BpfZbuf.Bufb", Field, 0}, + {"BpfZbuf.Buflen", Field, 0}, + {"BpfZbufHeader", Type, 0}, + {"BpfZbufHeader.Kernel_gen", Field, 0}, + {"BpfZbufHeader.Kernel_len", Field, 0}, + {"BpfZbufHeader.User_gen", Field, 0}, + {"BpfZbufHeader.X_bzh_pad", Field, 0}, + {"ByHandleFileInformation", Type, 0}, + {"ByHandleFileInformation.CreationTime", Field, 0}, + {"ByHandleFileInformation.FileAttributes", Field, 0}, + {"ByHandleFileInformation.FileIndexHigh", Field, 0}, + {"ByHandleFileInformation.FileIndexLow", Field, 0}, + {"ByHandleFileInformation.FileSizeHigh", Field, 0}, + {"ByHandleFileInformation.FileSizeLow", Field, 0}, + {"ByHandleFileInformation.LastAccessTime", Field, 0}, + {"ByHandleFileInformation.LastWriteTime", Field, 0}, + {"ByHandleFileInformation.NumberOfLinks", Field, 0}, + {"ByHandleFileInformation.VolumeSerialNumber", Field, 0}, + {"BytePtrFromString", Func, 1}, + {"ByteSliceFromString", Func, 1}, + {"CCR0_FLUSH", Const, 1}, + {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0}, + {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0}, + {"CERT_CHAIN_POLICY_BASE", Const, 0}, + {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_CHAIN_POLICY_EV", Const, 0}, + {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0}, + {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0}, + {"CERT_CHAIN_POLICY_SSL", Const, 0}, + {"CERT_E_CN_NO_MATCH", Const, 0}, + {"CERT_E_EXPIRED", Const, 0}, + {"CERT_E_PURPOSE", Const, 0}, + {"CERT_E_ROLE", Const, 0}, + {"CERT_E_UNTRUSTEDROOT", Const, 0}, + {"CERT_STORE_ADD_ALWAYS", Const, 0}, + {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0}, + {"CERT_STORE_PROV_MEMORY", Const, 0}, + {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_EXTENSION", Const, 0}, + {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_IS_CYCLIC", Const, 0}, + {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0}, + {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0}, + {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0}, + {"CERT_TRUST_IS_REVOKED", Const, 0}, + {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0}, + {"CERT_TRUST_NO_ERROR", Const, 0}, + {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0}, + {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0}, + {"CFLUSH", Const, 1}, + {"CLOCAL", Const, 0}, + {"CLONE_CHILD_CLEARTID", Const, 2}, + {"CLONE_CHILD_SETTID", Const, 2}, + {"CLONE_CLEAR_SIGHAND", Const, 20}, + {"CLONE_CSIGNAL", Const, 3}, + {"CLONE_DETACHED", Const, 2}, + {"CLONE_FILES", Const, 2}, + {"CLONE_FS", Const, 2}, + {"CLONE_INTO_CGROUP", Const, 20}, + {"CLONE_IO", Const, 2}, + {"CLONE_NEWCGROUP", Const, 20}, + {"CLONE_NEWIPC", Const, 2}, + {"CLONE_NEWNET", Const, 2}, + {"CLONE_NEWNS", Const, 2}, + {"CLONE_NEWPID", Const, 2}, + {"CLONE_NEWTIME", Const, 20}, + {"CLONE_NEWUSER", Const, 2}, + {"CLONE_NEWUTS", Const, 2}, + {"CLONE_PARENT", Const, 2}, + {"CLONE_PARENT_SETTID", Const, 2}, + {"CLONE_PID", Const, 3}, + {"CLONE_PIDFD", Const, 20}, + {"CLONE_PTRACE", Const, 2}, + {"CLONE_SETTLS", Const, 2}, + {"CLONE_SIGHAND", Const, 2}, + {"CLONE_SYSVSEM", Const, 2}, + {"CLONE_THREAD", Const, 2}, + {"CLONE_UNTRACED", Const, 2}, + {"CLONE_VFORK", Const, 2}, + {"CLONE_VM", Const, 2}, + {"CPUID_CFLUSH", Const, 1}, + {"CREAD", Const, 0}, + {"CREATE_ALWAYS", Const, 0}, + {"CREATE_NEW", Const, 0}, + {"CREATE_NEW_PROCESS_GROUP", Const, 1}, + {"CREATE_UNICODE_ENVIRONMENT", Const, 0}, + {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0}, + {"CRYPT_DELETEKEYSET", Const, 0}, + {"CRYPT_MACHINE_KEYSET", Const, 0}, + {"CRYPT_NEWKEYSET", Const, 0}, + {"CRYPT_SILENT", Const, 0}, + {"CRYPT_VERIFYCONTEXT", Const, 0}, + {"CS5", Const, 0}, + {"CS6", Const, 0}, + {"CS7", Const, 0}, + {"CS8", Const, 0}, + {"CSIZE", Const, 0}, + {"CSTART", Const, 1}, + {"CSTATUS", Const, 1}, + {"CSTOP", Const, 1}, + {"CSTOPB", Const, 0}, + {"CSUSP", Const, 1}, + {"CTL_MAXNAME", Const, 0}, + {"CTL_NET", Const, 0}, + {"CTL_QUERY", Const, 1}, + {"CTRL_BREAK_EVENT", Const, 1}, + {"CTRL_CLOSE_EVENT", Const, 14}, + {"CTRL_C_EVENT", Const, 1}, + {"CTRL_LOGOFF_EVENT", Const, 14}, + {"CTRL_SHUTDOWN_EVENT", Const, 14}, + {"CancelIo", Func, 0}, + {"CancelIoEx", Func, 1}, + {"CertAddCertificateContextToStore", Func, 0}, + {"CertChainContext", Type, 0}, + {"CertChainContext.ChainCount", Field, 0}, + {"CertChainContext.Chains", Field, 0}, + {"CertChainContext.HasRevocationFreshnessTime", Field, 0}, + {"CertChainContext.LowerQualityChainCount", Field, 0}, + {"CertChainContext.LowerQualityChains", Field, 0}, + {"CertChainContext.RevocationFreshnessTime", Field, 0}, + {"CertChainContext.Size", Field, 0}, + {"CertChainContext.TrustStatus", Field, 0}, + {"CertChainElement", Type, 0}, + {"CertChainElement.ApplicationUsage", Field, 0}, + {"CertChainElement.CertContext", Field, 0}, + {"CertChainElement.ExtendedErrorInfo", Field, 0}, + {"CertChainElement.IssuanceUsage", Field, 0}, + {"CertChainElement.RevocationInfo", Field, 0}, + {"CertChainElement.Size", Field, 0}, + {"CertChainElement.TrustStatus", Field, 0}, + {"CertChainPara", Type, 0}, + {"CertChainPara.CacheResync", Field, 0}, + {"CertChainPara.CheckRevocationFreshnessTime", Field, 0}, + {"CertChainPara.RequestedUsage", Field, 0}, + {"CertChainPara.RequstedIssuancePolicy", Field, 0}, + {"CertChainPara.RevocationFreshnessTime", Field, 0}, + {"CertChainPara.Size", Field, 0}, + {"CertChainPara.URLRetrievalTimeout", Field, 0}, + {"CertChainPolicyPara", Type, 0}, + {"CertChainPolicyPara.ExtraPolicyPara", Field, 0}, + {"CertChainPolicyPara.Flags", Field, 0}, + {"CertChainPolicyPara.Size", Field, 0}, + {"CertChainPolicyStatus", Type, 0}, + {"CertChainPolicyStatus.ChainIndex", Field, 0}, + {"CertChainPolicyStatus.ElementIndex", Field, 0}, + {"CertChainPolicyStatus.Error", Field, 0}, + {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0}, + {"CertChainPolicyStatus.Size", Field, 0}, + {"CertCloseStore", Func, 0}, + {"CertContext", Type, 0}, + {"CertContext.CertInfo", Field, 0}, + {"CertContext.EncodedCert", Field, 0}, + {"CertContext.EncodingType", Field, 0}, + {"CertContext.Length", Field, 0}, + {"CertContext.Store", Field, 0}, + {"CertCreateCertificateContext", Func, 0}, + {"CertEnhKeyUsage", Type, 0}, + {"CertEnhKeyUsage.Length", Field, 0}, + {"CertEnhKeyUsage.UsageIdentifiers", Field, 0}, + {"CertEnumCertificatesInStore", Func, 0}, + {"CertFreeCertificateChain", Func, 0}, + {"CertFreeCertificateContext", Func, 0}, + {"CertGetCertificateChain", Func, 0}, + {"CertInfo", Type, 11}, + {"CertOpenStore", Func, 0}, + {"CertOpenSystemStore", Func, 0}, + {"CertRevocationCrlInfo", Type, 11}, + {"CertRevocationInfo", Type, 0}, + {"CertRevocationInfo.CrlInfo", Field, 0}, + {"CertRevocationInfo.FreshnessTime", Field, 0}, + {"CertRevocationInfo.HasFreshnessTime", Field, 0}, + {"CertRevocationInfo.OidSpecificInfo", Field, 0}, + {"CertRevocationInfo.RevocationOid", Field, 0}, + {"CertRevocationInfo.RevocationResult", Field, 0}, + {"CertRevocationInfo.Size", Field, 0}, + {"CertSimpleChain", Type, 0}, + {"CertSimpleChain.Elements", Field, 0}, + {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.NumElements", Field, 0}, + {"CertSimpleChain.RevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.Size", Field, 0}, + {"CertSimpleChain.TrustListInfo", Field, 0}, + {"CertSimpleChain.TrustStatus", Field, 0}, + {"CertTrustListInfo", Type, 11}, + {"CertTrustStatus", Type, 0}, + {"CertTrustStatus.ErrorStatus", Field, 0}, + {"CertTrustStatus.InfoStatus", Field, 0}, + {"CertUsageMatch", Type, 0}, + {"CertUsageMatch.Type", Field, 0}, + {"CertUsageMatch.Usage", Field, 0}, + {"CertVerifyCertificateChainPolicy", Func, 0}, + {"Chdir", Func, 0}, + {"CheckBpfVersion", Func, 0}, + {"Chflags", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chroot", Func, 0}, + {"Clearenv", Func, 0}, + {"Close", Func, 0}, + {"CloseHandle", Func, 0}, + {"CloseOnExec", Func, 0}, + {"Closesocket", Func, 0}, + {"CmsgLen", Func, 0}, + {"CmsgSpace", Func, 0}, + {"Cmsghdr", Type, 0}, + {"Cmsghdr.Len", Field, 0}, + {"Cmsghdr.Level", Field, 0}, + {"Cmsghdr.Type", Field, 0}, + {"Cmsghdr.X__cmsg_data", Field, 0}, + {"CommandLineToArgv", Func, 0}, + {"ComputerName", Func, 0}, + {"Conn", Type, 9}, + {"Connect", Func, 0}, + {"ConnectEx", Func, 1}, + {"ConvertSidToStringSid", Func, 0}, + {"ConvertStringSidToSid", Func, 0}, + {"CopySid", Func, 0}, + {"Creat", Func, 0}, + {"CreateDirectory", Func, 0}, + {"CreateFile", Func, 0}, + {"CreateFileMapping", Func, 0}, + {"CreateHardLink", Func, 4}, + {"CreateIoCompletionPort", Func, 0}, + {"CreatePipe", Func, 0}, + {"CreateProcess", Func, 0}, + {"CreateProcessAsUser", Func, 10}, + {"CreateSymbolicLink", Func, 4}, + {"CreateToolhelp32Snapshot", Func, 4}, + {"Credential", Type, 0}, + {"Credential.Gid", Field, 0}, + {"Credential.Groups", Field, 0}, + {"Credential.NoSetGroups", Field, 9}, + {"Credential.Uid", Field, 0}, + {"CryptAcquireContext", Func, 0}, + {"CryptGenRandom", Func, 0}, + {"CryptReleaseContext", Func, 0}, + {"DIOCBSFLUSH", Const, 1}, + {"DIOCOSFPFLUSH", Const, 1}, + {"DLL", Type, 0}, + {"DLL.Handle", Field, 0}, + {"DLL.Name", Field, 0}, + {"DLLError", Type, 0}, + {"DLLError.Err", Field, 0}, + {"DLLError.Msg", Field, 0}, + {"DLLError.ObjName", Field, 0}, + {"DLT_A429", Const, 0}, + {"DLT_A653_ICM", Const, 0}, + {"DLT_AIRONET_HEADER", Const, 0}, + {"DLT_AOS", Const, 1}, + {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0}, + {"DLT_ARCNET", Const, 0}, + {"DLT_ARCNET_LINUX", Const, 0}, + {"DLT_ATM_CLIP", Const, 0}, + {"DLT_ATM_RFC1483", Const, 0}, + {"DLT_AURORA", Const, 0}, + {"DLT_AX25", Const, 0}, + {"DLT_AX25_KISS", Const, 0}, + {"DLT_BACNET_MS_TP", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0}, + {"DLT_CAN20B", Const, 0}, + {"DLT_CAN_SOCKETCAN", Const, 1}, + {"DLT_CHAOS", Const, 0}, + {"DLT_CHDLC", Const, 0}, + {"DLT_CISCO_IOS", Const, 0}, + {"DLT_C_HDLC", Const, 0}, + {"DLT_C_HDLC_WITH_DIR", Const, 0}, + {"DLT_DBUS", Const, 1}, + {"DLT_DECT", Const, 1}, + {"DLT_DOCSIS", Const, 0}, + {"DLT_DVB_CI", Const, 1}, + {"DLT_ECONET", Const, 0}, + {"DLT_EN10MB", Const, 0}, + {"DLT_EN3MB", Const, 0}, + {"DLT_ENC", Const, 0}, + {"DLT_ERF", Const, 0}, + {"DLT_ERF_ETH", Const, 0}, + {"DLT_ERF_POS", Const, 0}, + {"DLT_FC_2", Const, 1}, + {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1}, + {"DLT_FDDI", Const, 0}, + {"DLT_FLEXRAY", Const, 0}, + {"DLT_FRELAY", Const, 0}, + {"DLT_FRELAY_WITH_DIR", Const, 0}, + {"DLT_GCOM_SERIAL", Const, 0}, + {"DLT_GCOM_T1E1", Const, 0}, + {"DLT_GPF_F", Const, 0}, + {"DLT_GPF_T", Const, 0}, + {"DLT_GPRS_LLC", Const, 0}, + {"DLT_GSMTAP_ABIS", Const, 1}, + {"DLT_GSMTAP_UM", Const, 1}, + {"DLT_HDLC", Const, 1}, + {"DLT_HHDLC", Const, 0}, + {"DLT_HIPPI", Const, 1}, + {"DLT_IBM_SN", Const, 0}, + {"DLT_IBM_SP", Const, 0}, + {"DLT_IEEE802", Const, 0}, + {"DLT_IEEE802_11", Const, 0}, + {"DLT_IEEE802_11_RADIO", Const, 0}, + {"DLT_IEEE802_11_RADIO_AVS", Const, 0}, + {"DLT_IEEE802_15_4", Const, 0}, + {"DLT_IEEE802_15_4_LINUX", Const, 0}, + {"DLT_IEEE802_15_4_NOFCS", Const, 1}, + {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0}, + {"DLT_IPFILTER", Const, 0}, + {"DLT_IPMB", Const, 0}, + {"DLT_IPMB_LINUX", Const, 0}, + {"DLT_IPNET", Const, 1}, + {"DLT_IPOIB", Const, 1}, + {"DLT_IPV4", Const, 1}, + {"DLT_IPV6", Const, 1}, + {"DLT_IP_OVER_FC", Const, 0}, + {"DLT_JUNIPER_ATM1", Const, 0}, + {"DLT_JUNIPER_ATM2", Const, 0}, + {"DLT_JUNIPER_ATM_CEMIC", Const, 1}, + {"DLT_JUNIPER_CHDLC", Const, 0}, + {"DLT_JUNIPER_ES", Const, 0}, + {"DLT_JUNIPER_ETHER", Const, 0}, + {"DLT_JUNIPER_FIBRECHANNEL", Const, 1}, + {"DLT_JUNIPER_FRELAY", Const, 0}, + {"DLT_JUNIPER_GGSN", Const, 0}, + {"DLT_JUNIPER_ISM", Const, 0}, + {"DLT_JUNIPER_MFR", Const, 0}, + {"DLT_JUNIPER_MLFR", Const, 0}, + {"DLT_JUNIPER_MLPPP", Const, 0}, + {"DLT_JUNIPER_MONITOR", Const, 0}, + {"DLT_JUNIPER_PIC_PEER", Const, 0}, + {"DLT_JUNIPER_PPP", Const, 0}, + {"DLT_JUNIPER_PPPOE", Const, 0}, + {"DLT_JUNIPER_PPPOE_ATM", Const, 0}, + {"DLT_JUNIPER_SERVICES", Const, 0}, + {"DLT_JUNIPER_SRX_E2E", Const, 1}, + {"DLT_JUNIPER_ST", Const, 0}, + {"DLT_JUNIPER_VP", Const, 0}, + {"DLT_JUNIPER_VS", Const, 1}, + {"DLT_LAPB_WITH_DIR", Const, 0}, + {"DLT_LAPD", Const, 0}, + {"DLT_LIN", Const, 0}, + {"DLT_LINUX_EVDEV", Const, 1}, + {"DLT_LINUX_IRDA", Const, 0}, + {"DLT_LINUX_LAPD", Const, 0}, + {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0}, + {"DLT_LINUX_SLL", Const, 0}, + {"DLT_LOOP", Const, 0}, + {"DLT_LTALK", Const, 0}, + {"DLT_MATCHING_MAX", Const, 1}, + {"DLT_MATCHING_MIN", Const, 1}, + {"DLT_MFR", Const, 0}, + {"DLT_MOST", Const, 0}, + {"DLT_MPEG_2_TS", Const, 1}, + {"DLT_MPLS", Const, 1}, + {"DLT_MTP2", Const, 0}, + {"DLT_MTP2_WITH_PHDR", Const, 0}, + {"DLT_MTP3", Const, 0}, + {"DLT_MUX27010", Const, 1}, + {"DLT_NETANALYZER", Const, 1}, + {"DLT_NETANALYZER_TRANSPARENT", Const, 1}, + {"DLT_NFC_LLCP", Const, 1}, + {"DLT_NFLOG", Const, 1}, + {"DLT_NG40", Const, 1}, + {"DLT_NULL", Const, 0}, + {"DLT_PCI_EXP", Const, 0}, + {"DLT_PFLOG", Const, 0}, + {"DLT_PFSYNC", Const, 0}, + {"DLT_PPI", Const, 0}, + {"DLT_PPP", Const, 0}, + {"DLT_PPP_BSDOS", Const, 0}, + {"DLT_PPP_ETHER", Const, 0}, + {"DLT_PPP_PPPD", Const, 0}, + {"DLT_PPP_SERIAL", Const, 0}, + {"DLT_PPP_WITH_DIR", Const, 0}, + {"DLT_PPP_WITH_DIRECTION", Const, 0}, + {"DLT_PRISM_HEADER", Const, 0}, + {"DLT_PRONET", Const, 0}, + {"DLT_RAIF1", Const, 0}, + {"DLT_RAW", Const, 0}, + {"DLT_RAWAF_MASK", Const, 1}, + {"DLT_RIO", Const, 0}, + {"DLT_SCCP", Const, 0}, + {"DLT_SITA", Const, 0}, + {"DLT_SLIP", Const, 0}, + {"DLT_SLIP_BSDOS", Const, 0}, + {"DLT_STANAG_5066_D_PDU", Const, 1}, + {"DLT_SUNATM", Const, 0}, + {"DLT_SYMANTEC_FIREWALL", Const, 0}, + {"DLT_TZSP", Const, 0}, + {"DLT_USB", Const, 0}, + {"DLT_USB_LINUX", Const, 0}, + {"DLT_USB_LINUX_MMAPPED", Const, 1}, + {"DLT_USER0", Const, 0}, + {"DLT_USER1", Const, 0}, + {"DLT_USER10", Const, 0}, + {"DLT_USER11", Const, 0}, + {"DLT_USER12", Const, 0}, + {"DLT_USER13", Const, 0}, + {"DLT_USER14", Const, 0}, + {"DLT_USER15", Const, 0}, + {"DLT_USER2", Const, 0}, + {"DLT_USER3", Const, 0}, + {"DLT_USER4", Const, 0}, + {"DLT_USER5", Const, 0}, + {"DLT_USER6", Const, 0}, + {"DLT_USER7", Const, 0}, + {"DLT_USER8", Const, 0}, + {"DLT_USER9", Const, 0}, + {"DLT_WIHART", Const, 1}, + {"DLT_X2E_SERIAL", Const, 0}, + {"DLT_X2E_XORAYA", Const, 0}, + {"DNSMXData", Type, 0}, + {"DNSMXData.NameExchange", Field, 0}, + {"DNSMXData.Pad", Field, 0}, + {"DNSMXData.Preference", Field, 0}, + {"DNSPTRData", Type, 0}, + {"DNSPTRData.Host", Field, 0}, + {"DNSRecord", Type, 0}, + {"DNSRecord.Data", Field, 0}, + {"DNSRecord.Dw", Field, 0}, + {"DNSRecord.Length", Field, 0}, + {"DNSRecord.Name", Field, 0}, + {"DNSRecord.Next", Field, 0}, + {"DNSRecord.Reserved", Field, 0}, + {"DNSRecord.Ttl", Field, 0}, + {"DNSRecord.Type", Field, 0}, + {"DNSSRVData", Type, 0}, + {"DNSSRVData.Pad", Field, 0}, + {"DNSSRVData.Port", Field, 0}, + {"DNSSRVData.Priority", Field, 0}, + {"DNSSRVData.Target", Field, 0}, + {"DNSSRVData.Weight", Field, 0}, + {"DNSTXTData", Type, 0}, + {"DNSTXTData.StringArray", Field, 0}, + {"DNSTXTData.StringCount", Field, 0}, + {"DNS_INFO_NO_RECORDS", Const, 4}, + {"DNS_TYPE_A", Const, 0}, + {"DNS_TYPE_A6", Const, 0}, + {"DNS_TYPE_AAAA", Const, 0}, + {"DNS_TYPE_ADDRS", Const, 0}, + {"DNS_TYPE_AFSDB", Const, 0}, + {"DNS_TYPE_ALL", Const, 0}, + {"DNS_TYPE_ANY", Const, 0}, + {"DNS_TYPE_ATMA", Const, 0}, + {"DNS_TYPE_AXFR", Const, 0}, + {"DNS_TYPE_CERT", Const, 0}, + {"DNS_TYPE_CNAME", Const, 0}, + {"DNS_TYPE_DHCID", Const, 0}, + {"DNS_TYPE_DNAME", Const, 0}, + {"DNS_TYPE_DNSKEY", Const, 0}, + {"DNS_TYPE_DS", Const, 0}, + {"DNS_TYPE_EID", Const, 0}, + {"DNS_TYPE_GID", Const, 0}, + {"DNS_TYPE_GPOS", Const, 0}, + {"DNS_TYPE_HINFO", Const, 0}, + {"DNS_TYPE_ISDN", Const, 0}, + {"DNS_TYPE_IXFR", Const, 0}, + {"DNS_TYPE_KEY", Const, 0}, + {"DNS_TYPE_KX", Const, 0}, + {"DNS_TYPE_LOC", Const, 0}, + {"DNS_TYPE_MAILA", Const, 0}, + {"DNS_TYPE_MAILB", Const, 0}, + {"DNS_TYPE_MB", Const, 0}, + {"DNS_TYPE_MD", Const, 0}, + {"DNS_TYPE_MF", Const, 0}, + {"DNS_TYPE_MG", Const, 0}, + {"DNS_TYPE_MINFO", Const, 0}, + {"DNS_TYPE_MR", Const, 0}, + {"DNS_TYPE_MX", Const, 0}, + {"DNS_TYPE_NAPTR", Const, 0}, + {"DNS_TYPE_NBSTAT", Const, 0}, + {"DNS_TYPE_NIMLOC", Const, 0}, + {"DNS_TYPE_NS", Const, 0}, + {"DNS_TYPE_NSAP", Const, 0}, + {"DNS_TYPE_NSAPPTR", Const, 0}, + {"DNS_TYPE_NSEC", Const, 0}, + {"DNS_TYPE_NULL", Const, 0}, + {"DNS_TYPE_NXT", Const, 0}, + {"DNS_TYPE_OPT", Const, 0}, + {"DNS_TYPE_PTR", Const, 0}, + {"DNS_TYPE_PX", Const, 0}, + {"DNS_TYPE_RP", Const, 0}, + {"DNS_TYPE_RRSIG", Const, 0}, + {"DNS_TYPE_RT", Const, 0}, + {"DNS_TYPE_SIG", Const, 0}, + {"DNS_TYPE_SINK", Const, 0}, + {"DNS_TYPE_SOA", Const, 0}, + {"DNS_TYPE_SRV", Const, 0}, + {"DNS_TYPE_TEXT", Const, 0}, + {"DNS_TYPE_TKEY", Const, 0}, + {"DNS_TYPE_TSIG", Const, 0}, + {"DNS_TYPE_UID", Const, 0}, + {"DNS_TYPE_UINFO", Const, 0}, + {"DNS_TYPE_UNSPEC", Const, 0}, + {"DNS_TYPE_WINS", Const, 0}, + {"DNS_TYPE_WINSR", Const, 0}, + {"DNS_TYPE_WKS", Const, 0}, + {"DNS_TYPE_X25", Const, 0}, + {"DT_BLK", Const, 0}, + {"DT_CHR", Const, 0}, + {"DT_DIR", Const, 0}, + {"DT_FIFO", Const, 0}, + {"DT_LNK", Const, 0}, + {"DT_REG", Const, 0}, + {"DT_SOCK", Const, 0}, + {"DT_UNKNOWN", Const, 0}, + {"DT_WHT", Const, 0}, + {"DUPLICATE_CLOSE_SOURCE", Const, 0}, + {"DUPLICATE_SAME_ACCESS", Const, 0}, + {"DeleteFile", Func, 0}, + {"DetachLsf", Func, 0}, + {"DeviceIoControl", Func, 4}, + {"Dirent", Type, 0}, + {"Dirent.Fileno", Field, 0}, + {"Dirent.Ino", Field, 0}, + {"Dirent.Name", Field, 0}, + {"Dirent.Namlen", Field, 0}, + {"Dirent.Off", Field, 0}, + {"Dirent.Pad0", Field, 12}, + {"Dirent.Pad1", Field, 12}, + {"Dirent.Pad_cgo_0", Field, 0}, + {"Dirent.Reclen", Field, 0}, + {"Dirent.Seekoff", Field, 0}, + {"Dirent.Type", Field, 0}, + {"Dirent.X__d_padding", Field, 3}, + {"DnsNameCompare", Func, 4}, + {"DnsQuery", Func, 0}, + {"DnsRecordListFree", Func, 0}, + {"DnsSectionAdditional", Const, 4}, + {"DnsSectionAnswer", Const, 4}, + {"DnsSectionAuthority", Const, 4}, + {"DnsSectionQuestion", Const, 4}, + {"Dup", Func, 0}, + {"Dup2", Func, 0}, + {"Dup3", Func, 2}, + {"DuplicateHandle", Func, 0}, + {"E2BIG", Const, 0}, + {"EACCES", Const, 0}, + {"EADDRINUSE", Const, 0}, + {"EADDRNOTAVAIL", Const, 0}, + {"EADV", Const, 0}, + {"EAFNOSUPPORT", Const, 0}, + {"EAGAIN", Const, 0}, + {"EALREADY", Const, 0}, + {"EAUTH", Const, 0}, + {"EBADARCH", Const, 0}, + {"EBADE", Const, 0}, + {"EBADEXEC", Const, 0}, + {"EBADF", Const, 0}, + {"EBADFD", Const, 0}, + {"EBADMACHO", Const, 0}, + {"EBADMSG", Const, 0}, + {"EBADR", Const, 0}, + {"EBADRPC", Const, 0}, + {"EBADRQC", Const, 0}, + {"EBADSLT", Const, 0}, + {"EBFONT", Const, 0}, + {"EBUSY", Const, 0}, + {"ECANCELED", Const, 0}, + {"ECAPMODE", Const, 1}, + {"ECHILD", Const, 0}, + {"ECHO", Const, 0}, + {"ECHOCTL", Const, 0}, + {"ECHOE", Const, 0}, + {"ECHOK", Const, 0}, + {"ECHOKE", Const, 0}, + {"ECHONL", Const, 0}, + {"ECHOPRT", Const, 0}, + {"ECHRNG", Const, 0}, + {"ECOMM", Const, 0}, + {"ECONNABORTED", Const, 0}, + {"ECONNREFUSED", Const, 0}, + {"ECONNRESET", Const, 0}, + {"EDEADLK", Const, 0}, + {"EDEADLOCK", Const, 0}, + {"EDESTADDRREQ", Const, 0}, + {"EDEVERR", Const, 0}, + {"EDOM", Const, 0}, + {"EDOOFUS", Const, 0}, + {"EDOTDOT", Const, 0}, + {"EDQUOT", Const, 0}, + {"EEXIST", Const, 0}, + {"EFAULT", Const, 0}, + {"EFBIG", Const, 0}, + {"EFER_LMA", Const, 1}, + {"EFER_LME", Const, 1}, + {"EFER_NXE", Const, 1}, + {"EFER_SCE", Const, 1}, + {"EFTYPE", Const, 0}, + {"EHOSTDOWN", Const, 0}, + {"EHOSTUNREACH", Const, 0}, + {"EHWPOISON", Const, 0}, + {"EIDRM", Const, 0}, + {"EILSEQ", Const, 0}, + {"EINPROGRESS", Const, 0}, + {"EINTR", Const, 0}, + {"EINVAL", Const, 0}, + {"EIO", Const, 0}, + {"EIPSEC", Const, 1}, + {"EISCONN", Const, 0}, + {"EISDIR", Const, 0}, + {"EISNAM", Const, 0}, + {"EKEYEXPIRED", Const, 0}, + {"EKEYREJECTED", Const, 0}, + {"EKEYREVOKED", Const, 0}, + {"EL2HLT", Const, 0}, + {"EL2NSYNC", Const, 0}, + {"EL3HLT", Const, 0}, + {"EL3RST", Const, 0}, + {"ELAST", Const, 0}, + {"ELF_NGREG", Const, 0}, + {"ELF_PRARGSZ", Const, 0}, + {"ELIBACC", Const, 0}, + {"ELIBBAD", Const, 0}, + {"ELIBEXEC", Const, 0}, + {"ELIBMAX", Const, 0}, + {"ELIBSCN", Const, 0}, + {"ELNRNG", Const, 0}, + {"ELOOP", Const, 0}, + {"EMEDIUMTYPE", Const, 0}, + {"EMFILE", Const, 0}, + {"EMLINK", Const, 0}, + {"EMSGSIZE", Const, 0}, + {"EMT_TAGOVF", Const, 1}, + {"EMULTIHOP", Const, 0}, + {"EMUL_ENABLED", Const, 1}, + {"EMUL_LINUX", Const, 1}, + {"EMUL_LINUX32", Const, 1}, + {"EMUL_MAXID", Const, 1}, + {"EMUL_NATIVE", Const, 1}, + {"ENAMETOOLONG", Const, 0}, + {"ENAVAIL", Const, 0}, + {"ENDRUNDISC", Const, 1}, + {"ENEEDAUTH", Const, 0}, + {"ENETDOWN", Const, 0}, + {"ENETRESET", Const, 0}, + {"ENETUNREACH", Const, 0}, + {"ENFILE", Const, 0}, + {"ENOANO", Const, 0}, + {"ENOATTR", Const, 0}, + {"ENOBUFS", Const, 0}, + {"ENOCSI", Const, 0}, + {"ENODATA", Const, 0}, + {"ENODEV", Const, 0}, + {"ENOENT", Const, 0}, + {"ENOEXEC", Const, 0}, + {"ENOKEY", Const, 0}, + {"ENOLCK", Const, 0}, + {"ENOLINK", Const, 0}, + {"ENOMEDIUM", Const, 0}, + {"ENOMEM", Const, 0}, + {"ENOMSG", Const, 0}, + {"ENONET", Const, 0}, + {"ENOPKG", Const, 0}, + {"ENOPOLICY", Const, 0}, + {"ENOPROTOOPT", Const, 0}, + {"ENOSPC", Const, 0}, + {"ENOSR", Const, 0}, + {"ENOSTR", Const, 0}, + {"ENOSYS", Const, 0}, + {"ENOTBLK", Const, 0}, + {"ENOTCAPABLE", Const, 0}, + {"ENOTCONN", Const, 0}, + {"ENOTDIR", Const, 0}, + {"ENOTEMPTY", Const, 0}, + {"ENOTNAM", Const, 0}, + {"ENOTRECOVERABLE", Const, 0}, + {"ENOTSOCK", Const, 0}, + {"ENOTSUP", Const, 0}, + {"ENOTTY", Const, 0}, + {"ENOTUNIQ", Const, 0}, + {"ENXIO", Const, 0}, + {"EN_SW_CTL_INF", Const, 1}, + {"EN_SW_CTL_PREC", Const, 1}, + {"EN_SW_CTL_ROUND", Const, 1}, + {"EN_SW_DATACHAIN", Const, 1}, + {"EN_SW_DENORM", Const, 1}, + {"EN_SW_INVOP", Const, 1}, + {"EN_SW_OVERFLOW", Const, 1}, + {"EN_SW_PRECLOSS", Const, 1}, + {"EN_SW_UNDERFLOW", Const, 1}, + {"EN_SW_ZERODIV", Const, 1}, + {"EOPNOTSUPP", Const, 0}, + {"EOVERFLOW", Const, 0}, + {"EOWNERDEAD", Const, 0}, + {"EPERM", Const, 0}, + {"EPFNOSUPPORT", Const, 0}, + {"EPIPE", Const, 0}, + {"EPOLLERR", Const, 0}, + {"EPOLLET", Const, 0}, + {"EPOLLHUP", Const, 0}, + {"EPOLLIN", Const, 0}, + {"EPOLLMSG", Const, 0}, + {"EPOLLONESHOT", Const, 0}, + {"EPOLLOUT", Const, 0}, + {"EPOLLPRI", Const, 0}, + {"EPOLLRDBAND", Const, 0}, + {"EPOLLRDHUP", Const, 0}, + {"EPOLLRDNORM", Const, 0}, + {"EPOLLWRBAND", Const, 0}, + {"EPOLLWRNORM", Const, 0}, + {"EPOLL_CLOEXEC", Const, 0}, + {"EPOLL_CTL_ADD", Const, 0}, + {"EPOLL_CTL_DEL", Const, 0}, + {"EPOLL_CTL_MOD", Const, 0}, + {"EPOLL_NONBLOCK", Const, 0}, + {"EPROCLIM", Const, 0}, + {"EPROCUNAVAIL", Const, 0}, + {"EPROGMISMATCH", Const, 0}, + {"EPROGUNAVAIL", Const, 0}, + {"EPROTO", Const, 0}, + {"EPROTONOSUPPORT", Const, 0}, + {"EPROTOTYPE", Const, 0}, + {"EPWROFF", Const, 0}, + {"EQFULL", Const, 16}, + {"ERANGE", Const, 0}, + {"EREMCHG", Const, 0}, + {"EREMOTE", Const, 0}, + {"EREMOTEIO", Const, 0}, + {"ERESTART", Const, 0}, + {"ERFKILL", Const, 0}, + {"EROFS", Const, 0}, + {"ERPCMISMATCH", Const, 0}, + {"ERROR_ACCESS_DENIED", Const, 0}, + {"ERROR_ALREADY_EXISTS", Const, 0}, + {"ERROR_BROKEN_PIPE", Const, 0}, + {"ERROR_BUFFER_OVERFLOW", Const, 0}, + {"ERROR_DIR_NOT_EMPTY", Const, 8}, + {"ERROR_ENVVAR_NOT_FOUND", Const, 0}, + {"ERROR_FILE_EXISTS", Const, 0}, + {"ERROR_FILE_NOT_FOUND", Const, 0}, + {"ERROR_HANDLE_EOF", Const, 2}, + {"ERROR_INSUFFICIENT_BUFFER", Const, 0}, + {"ERROR_IO_PENDING", Const, 0}, + {"ERROR_MOD_NOT_FOUND", Const, 0}, + {"ERROR_MORE_DATA", Const, 3}, + {"ERROR_NETNAME_DELETED", Const, 3}, + {"ERROR_NOT_FOUND", Const, 1}, + {"ERROR_NO_MORE_FILES", Const, 0}, + {"ERROR_OPERATION_ABORTED", Const, 0}, + {"ERROR_PATH_NOT_FOUND", Const, 0}, + {"ERROR_PRIVILEGE_NOT_HELD", Const, 4}, + {"ERROR_PROC_NOT_FOUND", Const, 0}, + {"ESHLIBVERS", Const, 0}, + {"ESHUTDOWN", Const, 0}, + {"ESOCKTNOSUPPORT", Const, 0}, + {"ESPIPE", Const, 0}, + {"ESRCH", Const, 0}, + {"ESRMNT", Const, 0}, + {"ESTALE", Const, 0}, + {"ESTRPIPE", Const, 0}, + {"ETHERCAP_JUMBO_MTU", Const, 1}, + {"ETHERCAP_VLAN_HWTAGGING", Const, 1}, + {"ETHERCAP_VLAN_MTU", Const, 1}, + {"ETHERMIN", Const, 1}, + {"ETHERMTU", Const, 1}, + {"ETHERMTU_JUMBO", Const, 1}, + {"ETHERTYPE_8023", Const, 1}, + {"ETHERTYPE_AARP", Const, 1}, + {"ETHERTYPE_ACCTON", Const, 1}, + {"ETHERTYPE_AEONIC", Const, 1}, + {"ETHERTYPE_ALPHA", Const, 1}, + {"ETHERTYPE_AMBER", Const, 1}, + {"ETHERTYPE_AMOEBA", Const, 1}, + {"ETHERTYPE_AOE", Const, 1}, + {"ETHERTYPE_APOLLO", Const, 1}, + {"ETHERTYPE_APOLLODOMAIN", Const, 1}, + {"ETHERTYPE_APPLETALK", Const, 1}, + {"ETHERTYPE_APPLITEK", Const, 1}, + {"ETHERTYPE_ARGONAUT", Const, 1}, + {"ETHERTYPE_ARP", Const, 1}, + {"ETHERTYPE_AT", Const, 1}, + {"ETHERTYPE_ATALK", Const, 1}, + {"ETHERTYPE_ATOMIC", Const, 1}, + {"ETHERTYPE_ATT", Const, 1}, + {"ETHERTYPE_ATTSTANFORD", Const, 1}, + {"ETHERTYPE_AUTOPHON", Const, 1}, + {"ETHERTYPE_AXIS", Const, 1}, + {"ETHERTYPE_BCLOOP", Const, 1}, + {"ETHERTYPE_BOFL", Const, 1}, + {"ETHERTYPE_CABLETRON", Const, 1}, + {"ETHERTYPE_CHAOS", Const, 1}, + {"ETHERTYPE_COMDESIGN", Const, 1}, + {"ETHERTYPE_COMPUGRAPHIC", Const, 1}, + {"ETHERTYPE_COUNTERPOINT", Const, 1}, + {"ETHERTYPE_CRONUS", Const, 1}, + {"ETHERTYPE_CRONUSVLN", Const, 1}, + {"ETHERTYPE_DCA", Const, 1}, + {"ETHERTYPE_DDE", Const, 1}, + {"ETHERTYPE_DEBNI", Const, 1}, + {"ETHERTYPE_DECAM", Const, 1}, + {"ETHERTYPE_DECCUST", Const, 1}, + {"ETHERTYPE_DECDIAG", Const, 1}, + {"ETHERTYPE_DECDNS", Const, 1}, + {"ETHERTYPE_DECDTS", Const, 1}, + {"ETHERTYPE_DECEXPER", Const, 1}, + {"ETHERTYPE_DECLAST", Const, 1}, + {"ETHERTYPE_DECLTM", Const, 1}, + {"ETHERTYPE_DECMUMPS", Const, 1}, + {"ETHERTYPE_DECNETBIOS", Const, 1}, + {"ETHERTYPE_DELTACON", Const, 1}, + {"ETHERTYPE_DIDDLE", Const, 1}, + {"ETHERTYPE_DLOG1", Const, 1}, + {"ETHERTYPE_DLOG2", Const, 1}, + {"ETHERTYPE_DN", Const, 1}, + {"ETHERTYPE_DOGFIGHT", Const, 1}, + {"ETHERTYPE_DSMD", Const, 1}, + {"ETHERTYPE_ECMA", Const, 1}, + {"ETHERTYPE_ENCRYPT", Const, 1}, + {"ETHERTYPE_ES", Const, 1}, + {"ETHERTYPE_EXCELAN", Const, 1}, + {"ETHERTYPE_EXPERDATA", Const, 1}, + {"ETHERTYPE_FLIP", Const, 1}, + {"ETHERTYPE_FLOWCONTROL", Const, 1}, + {"ETHERTYPE_FRARP", Const, 1}, + {"ETHERTYPE_GENDYN", Const, 1}, + {"ETHERTYPE_HAYES", Const, 1}, + {"ETHERTYPE_HIPPI_FP", Const, 1}, + {"ETHERTYPE_HITACHI", Const, 1}, + {"ETHERTYPE_HP", Const, 1}, + {"ETHERTYPE_IEEEPUP", Const, 1}, + {"ETHERTYPE_IEEEPUPAT", Const, 1}, + {"ETHERTYPE_IMLBL", Const, 1}, + {"ETHERTYPE_IMLBLDIAG", Const, 1}, + {"ETHERTYPE_IP", Const, 1}, + {"ETHERTYPE_IPAS", Const, 1}, + {"ETHERTYPE_IPV6", Const, 1}, + {"ETHERTYPE_IPX", Const, 1}, + {"ETHERTYPE_IPXNEW", Const, 1}, + {"ETHERTYPE_KALPANA", Const, 1}, + {"ETHERTYPE_LANBRIDGE", Const, 1}, + {"ETHERTYPE_LANPROBE", Const, 1}, + {"ETHERTYPE_LAT", Const, 1}, + {"ETHERTYPE_LBACK", Const, 1}, + {"ETHERTYPE_LITTLE", Const, 1}, + {"ETHERTYPE_LLDP", Const, 1}, + {"ETHERTYPE_LOGICRAFT", Const, 1}, + {"ETHERTYPE_LOOPBACK", Const, 1}, + {"ETHERTYPE_MATRA", Const, 1}, + {"ETHERTYPE_MAX", Const, 1}, + {"ETHERTYPE_MERIT", Const, 1}, + {"ETHERTYPE_MICP", Const, 1}, + {"ETHERTYPE_MOPDL", Const, 1}, + {"ETHERTYPE_MOPRC", Const, 1}, + {"ETHERTYPE_MOTOROLA", Const, 1}, + {"ETHERTYPE_MPLS", Const, 1}, + {"ETHERTYPE_MPLS_MCAST", Const, 1}, + {"ETHERTYPE_MUMPS", Const, 1}, + {"ETHERTYPE_NBPCC", Const, 1}, + {"ETHERTYPE_NBPCLAIM", Const, 1}, + {"ETHERTYPE_NBPCLREQ", Const, 1}, + {"ETHERTYPE_NBPCLRSP", Const, 1}, + {"ETHERTYPE_NBPCREQ", Const, 1}, + {"ETHERTYPE_NBPCRSP", Const, 1}, + {"ETHERTYPE_NBPDG", Const, 1}, + {"ETHERTYPE_NBPDGB", Const, 1}, + {"ETHERTYPE_NBPDLTE", Const, 1}, + {"ETHERTYPE_NBPRAR", Const, 1}, + {"ETHERTYPE_NBPRAS", Const, 1}, + {"ETHERTYPE_NBPRST", Const, 1}, + {"ETHERTYPE_NBPSCD", Const, 1}, + {"ETHERTYPE_NBPVCD", Const, 1}, + {"ETHERTYPE_NBS", Const, 1}, + {"ETHERTYPE_NCD", Const, 1}, + {"ETHERTYPE_NESTAR", Const, 1}, + {"ETHERTYPE_NETBEUI", Const, 1}, + {"ETHERTYPE_NOVELL", Const, 1}, + {"ETHERTYPE_NS", Const, 1}, + {"ETHERTYPE_NSAT", Const, 1}, + {"ETHERTYPE_NSCOMPAT", Const, 1}, + {"ETHERTYPE_NTRAILER", Const, 1}, + {"ETHERTYPE_OS9", Const, 1}, + {"ETHERTYPE_OS9NET", Const, 1}, + {"ETHERTYPE_PACER", Const, 1}, + {"ETHERTYPE_PAE", Const, 1}, + {"ETHERTYPE_PCS", Const, 1}, + {"ETHERTYPE_PLANNING", Const, 1}, + {"ETHERTYPE_PPP", Const, 1}, + {"ETHERTYPE_PPPOE", Const, 1}, + {"ETHERTYPE_PPPOEDISC", Const, 1}, + {"ETHERTYPE_PRIMENTS", Const, 1}, + {"ETHERTYPE_PUP", Const, 1}, + {"ETHERTYPE_PUPAT", Const, 1}, + {"ETHERTYPE_QINQ", Const, 1}, + {"ETHERTYPE_RACAL", Const, 1}, + {"ETHERTYPE_RATIONAL", Const, 1}, + {"ETHERTYPE_RAWFR", Const, 1}, + {"ETHERTYPE_RCL", Const, 1}, + {"ETHERTYPE_RDP", Const, 1}, + {"ETHERTYPE_RETIX", Const, 1}, + {"ETHERTYPE_REVARP", Const, 1}, + {"ETHERTYPE_SCA", Const, 1}, + {"ETHERTYPE_SECTRA", Const, 1}, + {"ETHERTYPE_SECUREDATA", Const, 1}, + {"ETHERTYPE_SGITW", Const, 1}, + {"ETHERTYPE_SG_BOUNCE", Const, 1}, + {"ETHERTYPE_SG_DIAG", Const, 1}, + {"ETHERTYPE_SG_NETGAMES", Const, 1}, + {"ETHERTYPE_SG_RESV", Const, 1}, + {"ETHERTYPE_SIMNET", Const, 1}, + {"ETHERTYPE_SLOW", Const, 1}, + {"ETHERTYPE_SLOWPROTOCOLS", Const, 1}, + {"ETHERTYPE_SNA", Const, 1}, + {"ETHERTYPE_SNMP", Const, 1}, + {"ETHERTYPE_SONIX", Const, 1}, + {"ETHERTYPE_SPIDER", Const, 1}, + {"ETHERTYPE_SPRITE", Const, 1}, + {"ETHERTYPE_STP", Const, 1}, + {"ETHERTYPE_TALARIS", Const, 1}, + {"ETHERTYPE_TALARISMC", Const, 1}, + {"ETHERTYPE_TCPCOMP", Const, 1}, + {"ETHERTYPE_TCPSM", Const, 1}, + {"ETHERTYPE_TEC", Const, 1}, + {"ETHERTYPE_TIGAN", Const, 1}, + {"ETHERTYPE_TRAIL", Const, 1}, + {"ETHERTYPE_TRANSETHER", Const, 1}, + {"ETHERTYPE_TYMSHARE", Const, 1}, + {"ETHERTYPE_UBBST", Const, 1}, + {"ETHERTYPE_UBDEBUG", Const, 1}, + {"ETHERTYPE_UBDIAGLOOP", Const, 1}, + {"ETHERTYPE_UBDL", Const, 1}, + {"ETHERTYPE_UBNIU", Const, 1}, + {"ETHERTYPE_UBNMC", Const, 1}, + {"ETHERTYPE_VALID", Const, 1}, + {"ETHERTYPE_VARIAN", Const, 1}, + {"ETHERTYPE_VAXELN", Const, 1}, + {"ETHERTYPE_VEECO", Const, 1}, + {"ETHERTYPE_VEXP", Const, 1}, + {"ETHERTYPE_VGLAB", Const, 1}, + {"ETHERTYPE_VINES", Const, 1}, + {"ETHERTYPE_VINESECHO", Const, 1}, + {"ETHERTYPE_VINESLOOP", Const, 1}, + {"ETHERTYPE_VITAL", Const, 1}, + {"ETHERTYPE_VLAN", Const, 1}, + {"ETHERTYPE_VLTLMAN", Const, 1}, + {"ETHERTYPE_VPROD", Const, 1}, + {"ETHERTYPE_VURESERVED", Const, 1}, + {"ETHERTYPE_WATERLOO", Const, 1}, + {"ETHERTYPE_WELLFLEET", Const, 1}, + {"ETHERTYPE_X25", Const, 1}, + {"ETHERTYPE_X75", Const, 1}, + {"ETHERTYPE_XNSSM", Const, 1}, + {"ETHERTYPE_XTP", Const, 1}, + {"ETHER_ADDR_LEN", Const, 1}, + {"ETHER_ALIGN", Const, 1}, + {"ETHER_CRC_LEN", Const, 1}, + {"ETHER_CRC_POLY_BE", Const, 1}, + {"ETHER_CRC_POLY_LE", Const, 1}, + {"ETHER_HDR_LEN", Const, 1}, + {"ETHER_MAX_DIX_LEN", Const, 1}, + {"ETHER_MAX_LEN", Const, 1}, + {"ETHER_MAX_LEN_JUMBO", Const, 1}, + {"ETHER_MIN_LEN", Const, 1}, + {"ETHER_PPPOE_ENCAP_LEN", Const, 1}, + {"ETHER_TYPE_LEN", Const, 1}, + {"ETHER_VLAN_ENCAP_LEN", Const, 1}, + {"ETH_P_1588", Const, 0}, + {"ETH_P_8021Q", Const, 0}, + {"ETH_P_802_2", Const, 0}, + {"ETH_P_802_3", Const, 0}, + {"ETH_P_AARP", Const, 0}, + {"ETH_P_ALL", Const, 0}, + {"ETH_P_AOE", Const, 0}, + {"ETH_P_ARCNET", Const, 0}, + {"ETH_P_ARP", Const, 0}, + {"ETH_P_ATALK", Const, 0}, + {"ETH_P_ATMFATE", Const, 0}, + {"ETH_P_ATMMPOA", Const, 0}, + {"ETH_P_AX25", Const, 0}, + {"ETH_P_BPQ", Const, 0}, + {"ETH_P_CAIF", Const, 0}, + {"ETH_P_CAN", Const, 0}, + {"ETH_P_CONTROL", Const, 0}, + {"ETH_P_CUST", Const, 0}, + {"ETH_P_DDCMP", Const, 0}, + {"ETH_P_DEC", Const, 0}, + {"ETH_P_DIAG", Const, 0}, + {"ETH_P_DNA_DL", Const, 0}, + {"ETH_P_DNA_RC", Const, 0}, + {"ETH_P_DNA_RT", Const, 0}, + {"ETH_P_DSA", Const, 0}, + {"ETH_P_ECONET", Const, 0}, + {"ETH_P_EDSA", Const, 0}, + {"ETH_P_FCOE", Const, 0}, + {"ETH_P_FIP", Const, 0}, + {"ETH_P_HDLC", Const, 0}, + {"ETH_P_IEEE802154", Const, 0}, + {"ETH_P_IEEEPUP", Const, 0}, + {"ETH_P_IEEEPUPAT", Const, 0}, + {"ETH_P_IP", Const, 0}, + {"ETH_P_IPV6", Const, 0}, + {"ETH_P_IPX", Const, 0}, + {"ETH_P_IRDA", Const, 0}, + {"ETH_P_LAT", Const, 0}, + {"ETH_P_LINK_CTL", Const, 0}, + {"ETH_P_LOCALTALK", Const, 0}, + {"ETH_P_LOOP", Const, 0}, + {"ETH_P_MOBITEX", Const, 0}, + {"ETH_P_MPLS_MC", Const, 0}, + {"ETH_P_MPLS_UC", Const, 0}, + {"ETH_P_PAE", Const, 0}, + {"ETH_P_PAUSE", Const, 0}, + {"ETH_P_PHONET", Const, 0}, + {"ETH_P_PPPTALK", Const, 0}, + {"ETH_P_PPP_DISC", Const, 0}, + {"ETH_P_PPP_MP", Const, 0}, + {"ETH_P_PPP_SES", Const, 0}, + {"ETH_P_PUP", Const, 0}, + {"ETH_P_PUPAT", Const, 0}, + {"ETH_P_RARP", Const, 0}, + {"ETH_P_SCA", Const, 0}, + {"ETH_P_SLOW", Const, 0}, + {"ETH_P_SNAP", Const, 0}, + {"ETH_P_TEB", Const, 0}, + {"ETH_P_TIPC", Const, 0}, + {"ETH_P_TRAILER", Const, 0}, + {"ETH_P_TR_802_2", Const, 0}, + {"ETH_P_WAN_PPP", Const, 0}, + {"ETH_P_WCCP", Const, 0}, + {"ETH_P_X25", Const, 0}, + {"ETIME", Const, 0}, + {"ETIMEDOUT", Const, 0}, + {"ETOOMANYREFS", Const, 0}, + {"ETXTBSY", Const, 0}, + {"EUCLEAN", Const, 0}, + {"EUNATCH", Const, 0}, + {"EUSERS", Const, 0}, + {"EVFILT_AIO", Const, 0}, + {"EVFILT_FS", Const, 0}, + {"EVFILT_LIO", Const, 0}, + {"EVFILT_MACHPORT", Const, 0}, + {"EVFILT_PROC", Const, 0}, + {"EVFILT_READ", Const, 0}, + {"EVFILT_SIGNAL", Const, 0}, + {"EVFILT_SYSCOUNT", Const, 0}, + {"EVFILT_THREADMARKER", Const, 0}, + {"EVFILT_TIMER", Const, 0}, + {"EVFILT_USER", Const, 0}, + {"EVFILT_VM", Const, 0}, + {"EVFILT_VNODE", Const, 0}, + {"EVFILT_WRITE", Const, 0}, + {"EV_ADD", Const, 0}, + {"EV_CLEAR", Const, 0}, + {"EV_DELETE", Const, 0}, + {"EV_DISABLE", Const, 0}, + {"EV_DISPATCH", Const, 0}, + {"EV_DROP", Const, 3}, + {"EV_ENABLE", Const, 0}, + {"EV_EOF", Const, 0}, + {"EV_ERROR", Const, 0}, + {"EV_FLAG0", Const, 0}, + {"EV_FLAG1", Const, 0}, + {"EV_ONESHOT", Const, 0}, + {"EV_OOBAND", Const, 0}, + {"EV_POLL", Const, 0}, + {"EV_RECEIPT", Const, 0}, + {"EV_SYSFLAGS", Const, 0}, + {"EWINDOWS", Const, 0}, + {"EWOULDBLOCK", Const, 0}, + {"EXDEV", Const, 0}, + {"EXFULL", Const, 0}, + {"EXTA", Const, 0}, + {"EXTB", Const, 0}, + {"EXTPROC", Const, 0}, + {"Environ", Func, 0}, + {"EpollCreate", Func, 0}, + {"EpollCreate1", Func, 0}, + {"EpollCtl", Func, 0}, + {"EpollEvent", Type, 0}, + {"EpollEvent.Events", Field, 0}, + {"EpollEvent.Fd", Field, 0}, + {"EpollEvent.Pad", Field, 0}, + {"EpollEvent.PadFd", Field, 0}, + {"EpollWait", Func, 0}, + {"Errno", Type, 0}, + {"EscapeArg", Func, 0}, + {"Exchangedata", Func, 0}, + {"Exec", Func, 0}, + {"Exit", Func, 0}, + {"ExitProcess", Func, 0}, + {"FD_CLOEXEC", Const, 0}, + {"FD_SETSIZE", Const, 0}, + {"FILE_ACTION_ADDED", Const, 0}, + {"FILE_ACTION_MODIFIED", Const, 0}, + {"FILE_ACTION_REMOVED", Const, 0}, + {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0}, + {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0}, + {"FILE_APPEND_DATA", Const, 0}, + {"FILE_ATTRIBUTE_ARCHIVE", Const, 0}, + {"FILE_ATTRIBUTE_DIRECTORY", Const, 0}, + {"FILE_ATTRIBUTE_HIDDEN", Const, 0}, + {"FILE_ATTRIBUTE_NORMAL", Const, 0}, + {"FILE_ATTRIBUTE_READONLY", Const, 0}, + {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4}, + {"FILE_ATTRIBUTE_SYSTEM", Const, 0}, + {"FILE_BEGIN", Const, 0}, + {"FILE_CURRENT", Const, 0}, + {"FILE_END", Const, 0}, + {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0}, + {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4}, + {"FILE_FLAG_OVERLAPPED", Const, 0}, + {"FILE_LIST_DIRECTORY", Const, 0}, + {"FILE_MAP_COPY", Const, 0}, + {"FILE_MAP_EXECUTE", Const, 0}, + {"FILE_MAP_READ", Const, 0}, + {"FILE_MAP_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0}, + {"FILE_NOTIFY_CHANGE_CREATION", Const, 0}, + {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_SIZE", Const, 0}, + {"FILE_SHARE_DELETE", Const, 0}, + {"FILE_SHARE_READ", Const, 0}, + {"FILE_SHARE_WRITE", Const, 0}, + {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2}, + {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2}, + {"FILE_TYPE_CHAR", Const, 0}, + {"FILE_TYPE_DISK", Const, 0}, + {"FILE_TYPE_PIPE", Const, 0}, + {"FILE_TYPE_REMOTE", Const, 0}, + {"FILE_TYPE_UNKNOWN", Const, 0}, + {"FILE_WRITE_ATTRIBUTES", Const, 0}, + {"FLUSHO", Const, 0}, + {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0}, + {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0}, + {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0}, + {"FORMAT_MESSAGE_FROM_STRING", Const, 0}, + {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0}, + {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0}, + {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0}, + {"FSCTL_GET_REPARSE_POINT", Const, 4}, + {"F_ADDFILESIGS", Const, 0}, + {"F_ADDSIGS", Const, 0}, + {"F_ALLOCATEALL", Const, 0}, + {"F_ALLOCATECONTIG", Const, 0}, + {"F_CANCEL", Const, 0}, + {"F_CHKCLEAN", Const, 0}, + {"F_CLOSEM", Const, 1}, + {"F_DUP2FD", Const, 0}, + {"F_DUP2FD_CLOEXEC", Const, 1}, + {"F_DUPFD", Const, 0}, + {"F_DUPFD_CLOEXEC", Const, 0}, + {"F_EXLCK", Const, 0}, + {"F_FINDSIGS", Const, 16}, + {"F_FLUSH_DATA", Const, 0}, + {"F_FREEZE_FS", Const, 0}, + {"F_FSCTL", Const, 1}, + {"F_FSDIRMASK", Const, 1}, + {"F_FSIN", Const, 1}, + {"F_FSINOUT", Const, 1}, + {"F_FSOUT", Const, 1}, + {"F_FSPRIV", Const, 1}, + {"F_FSVOID", Const, 1}, + {"F_FULLFSYNC", Const, 0}, + {"F_GETCODEDIR", Const, 16}, + {"F_GETFD", Const, 0}, + {"F_GETFL", Const, 0}, + {"F_GETLEASE", Const, 0}, + {"F_GETLK", Const, 0}, + {"F_GETLK64", Const, 0}, + {"F_GETLKPID", Const, 0}, + {"F_GETNOSIGPIPE", Const, 0}, + {"F_GETOWN", Const, 0}, + {"F_GETOWN_EX", Const, 0}, + {"F_GETPATH", Const, 0}, + {"F_GETPATH_MTMINFO", Const, 0}, + {"F_GETPIPE_SZ", Const, 0}, + {"F_GETPROTECTIONCLASS", Const, 0}, + {"F_GETPROTECTIONLEVEL", Const, 16}, + {"F_GETSIG", Const, 0}, + {"F_GLOBAL_NOCACHE", Const, 0}, + {"F_LOCK", Const, 0}, + {"F_LOG2PHYS", Const, 0}, + {"F_LOG2PHYS_EXT", Const, 0}, + {"F_MARKDEPENDENCY", Const, 0}, + {"F_MAXFD", Const, 1}, + {"F_NOCACHE", Const, 0}, + {"F_NODIRECT", Const, 0}, + {"F_NOTIFY", Const, 0}, + {"F_OGETLK", Const, 0}, + {"F_OK", Const, 0}, + {"F_OSETLK", Const, 0}, + {"F_OSETLKW", Const, 0}, + {"F_PARAM_MASK", Const, 1}, + {"F_PARAM_MAX", Const, 1}, + {"F_PATHPKG_CHECK", Const, 0}, + {"F_PEOFPOSMODE", Const, 0}, + {"F_PREALLOCATE", Const, 0}, + {"F_RDADVISE", Const, 0}, + {"F_RDAHEAD", Const, 0}, + {"F_RDLCK", Const, 0}, + {"F_READAHEAD", Const, 0}, + {"F_READBOOTSTRAP", Const, 0}, + {"F_SETBACKINGSTORE", Const, 0}, + {"F_SETFD", Const, 0}, + {"F_SETFL", Const, 0}, + {"F_SETLEASE", Const, 0}, + {"F_SETLK", Const, 0}, + {"F_SETLK64", Const, 0}, + {"F_SETLKW", Const, 0}, + {"F_SETLKW64", Const, 0}, + {"F_SETLKWTIMEOUT", Const, 16}, + {"F_SETLK_REMOTE", Const, 0}, + {"F_SETNOSIGPIPE", Const, 0}, + {"F_SETOWN", Const, 0}, + {"F_SETOWN_EX", Const, 0}, + {"F_SETPIPE_SZ", Const, 0}, + {"F_SETPROTECTIONCLASS", Const, 0}, + {"F_SETSIG", Const, 0}, + {"F_SETSIZE", Const, 0}, + {"F_SHLCK", Const, 0}, + {"F_SINGLE_WRITER", Const, 16}, + {"F_TEST", Const, 0}, + {"F_THAW_FS", Const, 0}, + {"F_TLOCK", Const, 0}, + {"F_TRANSCODEKEY", Const, 16}, + {"F_ULOCK", Const, 0}, + {"F_UNLCK", Const, 0}, + {"F_UNLCKSYS", Const, 0}, + {"F_VOLPOSMODE", Const, 0}, + {"F_WRITEBOOTSTRAP", Const, 0}, + {"F_WRLCK", Const, 0}, + {"Faccessat", Func, 0}, + {"Fallocate", Func, 0}, + {"Fbootstraptransfer_t", Type, 0}, + {"Fbootstraptransfer_t.Buffer", Field, 0}, + {"Fbootstraptransfer_t.Length", Field, 0}, + {"Fbootstraptransfer_t.Offset", Field, 0}, + {"Fchdir", Func, 0}, + {"Fchflags", Func, 0}, + {"Fchmod", Func, 0}, + {"Fchmodat", Func, 0}, + {"Fchown", Func, 0}, + {"Fchownat", Func, 0}, + {"FcntlFlock", Func, 3}, + {"FdSet", Type, 0}, + {"FdSet.Bits", Field, 0}, + {"FdSet.X__fds_bits", Field, 0}, + {"Fdatasync", Func, 0}, + {"FileNotifyInformation", Type, 0}, + {"FileNotifyInformation.Action", Field, 0}, + {"FileNotifyInformation.FileName", Field, 0}, + {"FileNotifyInformation.FileNameLength", Field, 0}, + {"FileNotifyInformation.NextEntryOffset", Field, 0}, + {"Filetime", Type, 0}, + {"Filetime.HighDateTime", Field, 0}, + {"Filetime.LowDateTime", Field, 0}, + {"FindClose", Func, 0}, + {"FindFirstFile", Func, 0}, + {"FindNextFile", Func, 0}, + {"Flock", Func, 0}, + {"Flock_t", Type, 0}, + {"Flock_t.Len", Field, 0}, + {"Flock_t.Pad_cgo_0", Field, 0}, + {"Flock_t.Pad_cgo_1", Field, 3}, + {"Flock_t.Pid", Field, 0}, + {"Flock_t.Start", Field, 0}, + {"Flock_t.Sysid", Field, 0}, + {"Flock_t.Type", Field, 0}, + {"Flock_t.Whence", Field, 0}, + {"FlushBpf", Func, 0}, + {"FlushFileBuffers", Func, 0}, + {"FlushViewOfFile", Func, 0}, + {"ForkExec", Func, 0}, + {"ForkLock", Var, 0}, + {"FormatMessage", Func, 0}, + {"Fpathconf", Func, 0}, + {"FreeAddrInfoW", Func, 1}, + {"FreeEnvironmentStrings", Func, 0}, + {"FreeLibrary", Func, 0}, + {"Fsid", Type, 0}, + {"Fsid.Val", Field, 0}, + {"Fsid.X__fsid_val", Field, 2}, + {"Fsid.X__val", Field, 0}, + {"Fstat", Func, 0}, + {"Fstatat", Func, 12}, + {"Fstatfs", Func, 0}, + {"Fstore_t", Type, 0}, + {"Fstore_t.Bytesalloc", Field, 0}, + {"Fstore_t.Flags", Field, 0}, + {"Fstore_t.Length", Field, 0}, + {"Fstore_t.Offset", Field, 0}, + {"Fstore_t.Posmode", Field, 0}, + {"Fsync", Func, 0}, + {"Ftruncate", Func, 0}, + {"FullPath", Func, 4}, + {"Futimes", Func, 0}, + {"Futimesat", Func, 0}, + {"GENERIC_ALL", Const, 0}, + {"GENERIC_EXECUTE", Const, 0}, + {"GENERIC_READ", Const, 0}, + {"GENERIC_WRITE", Const, 0}, + {"GUID", Type, 1}, + {"GUID.Data1", Field, 1}, + {"GUID.Data2", Field, 1}, + {"GUID.Data3", Field, 1}, + {"GUID.Data4", Field, 1}, + {"GetAcceptExSockaddrs", Func, 0}, + {"GetAdaptersInfo", Func, 0}, + {"GetAddrInfoW", Func, 1}, + {"GetCommandLine", Func, 0}, + {"GetComputerName", Func, 0}, + {"GetConsoleMode", Func, 1}, + {"GetCurrentDirectory", Func, 0}, + {"GetCurrentProcess", Func, 0}, + {"GetEnvironmentStrings", Func, 0}, + {"GetEnvironmentVariable", Func, 0}, + {"GetExitCodeProcess", Func, 0}, + {"GetFileAttributes", Func, 0}, + {"GetFileAttributesEx", Func, 0}, + {"GetFileExInfoStandard", Const, 0}, + {"GetFileExMaxInfoLevel", Const, 0}, + {"GetFileInformationByHandle", Func, 0}, + {"GetFileType", Func, 0}, + {"GetFullPathName", Func, 0}, + {"GetHostByName", Func, 0}, + {"GetIfEntry", Func, 0}, + {"GetLastError", Func, 0}, + {"GetLengthSid", Func, 0}, + {"GetLongPathName", Func, 0}, + {"GetProcAddress", Func, 0}, + {"GetProcessTimes", Func, 0}, + {"GetProtoByName", Func, 0}, + {"GetQueuedCompletionStatus", Func, 0}, + {"GetServByName", Func, 0}, + {"GetShortPathName", Func, 0}, + {"GetStartupInfo", Func, 0}, + {"GetStdHandle", Func, 0}, + {"GetSystemTimeAsFileTime", Func, 0}, + {"GetTempPath", Func, 0}, + {"GetTimeZoneInformation", Func, 0}, + {"GetTokenInformation", Func, 0}, + {"GetUserNameEx", Func, 0}, + {"GetUserProfileDirectory", Func, 0}, + {"GetVersion", Func, 0}, + {"Getcwd", Func, 0}, + {"Getdents", Func, 0}, + {"Getdirentries", Func, 0}, + {"Getdtablesize", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getfsstat", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpeername", Func, 0}, + {"Getpgid", Func, 0}, + {"Getpgrp", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getpriority", Func, 0}, + {"Getrlimit", Func, 0}, + {"Getrusage", Func, 0}, + {"Getsid", Func, 0}, + {"Getsockname", Func, 0}, + {"Getsockopt", Func, 1}, + {"GetsockoptByte", Func, 0}, + {"GetsockoptICMPv6Filter", Func, 2}, + {"GetsockoptIPMreq", Func, 0}, + {"GetsockoptIPMreqn", Func, 0}, + {"GetsockoptIPv6MTUInfo", Func, 2}, + {"GetsockoptIPv6Mreq", Func, 0}, + {"GetsockoptInet4Addr", Func, 0}, + {"GetsockoptInt", Func, 0}, + {"GetsockoptUcred", Func, 1}, + {"Gettid", Func, 0}, + {"Gettimeofday", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Getxattr", Func, 1}, + {"HANDLE_FLAG_INHERIT", Const, 0}, + {"HKEY_CLASSES_ROOT", Const, 0}, + {"HKEY_CURRENT_CONFIG", Const, 0}, + {"HKEY_CURRENT_USER", Const, 0}, + {"HKEY_DYN_DATA", Const, 0}, + {"HKEY_LOCAL_MACHINE", Const, 0}, + {"HKEY_PERFORMANCE_DATA", Const, 0}, + {"HKEY_USERS", Const, 0}, + {"HUPCL", Const, 0}, + {"Handle", Type, 0}, + {"Hostent", Type, 0}, + {"Hostent.AddrList", Field, 0}, + {"Hostent.AddrType", Field, 0}, + {"Hostent.Aliases", Field, 0}, + {"Hostent.Length", Field, 0}, + {"Hostent.Name", Field, 0}, + {"ICANON", Const, 0}, + {"ICMP6_FILTER", Const, 2}, + {"ICMPV6_FILTER", Const, 2}, + {"ICMPv6Filter", Type, 2}, + {"ICMPv6Filter.Data", Field, 2}, + {"ICMPv6Filter.Filt", Field, 2}, + {"ICRNL", Const, 0}, + {"IEXTEN", Const, 0}, + {"IFAN_ARRIVAL", Const, 1}, + {"IFAN_DEPARTURE", Const, 1}, + {"IFA_ADDRESS", Const, 0}, + {"IFA_ANYCAST", Const, 0}, + {"IFA_BROADCAST", Const, 0}, + {"IFA_CACHEINFO", Const, 0}, + {"IFA_F_DADFAILED", Const, 0}, + {"IFA_F_DEPRECATED", Const, 0}, + {"IFA_F_HOMEADDRESS", Const, 0}, + {"IFA_F_NODAD", Const, 0}, + {"IFA_F_OPTIMISTIC", Const, 0}, + {"IFA_F_PERMANENT", Const, 0}, + {"IFA_F_SECONDARY", Const, 0}, + {"IFA_F_TEMPORARY", Const, 0}, + {"IFA_F_TENTATIVE", Const, 0}, + {"IFA_LABEL", Const, 0}, + {"IFA_LOCAL", Const, 0}, + {"IFA_MAX", Const, 0}, + {"IFA_MULTICAST", Const, 0}, + {"IFA_ROUTE", Const, 1}, + {"IFA_UNSPEC", Const, 0}, + {"IFF_ALLMULTI", Const, 0}, + {"IFF_ALTPHYS", Const, 0}, + {"IFF_AUTOMEDIA", Const, 0}, + {"IFF_BROADCAST", Const, 0}, + {"IFF_CANTCHANGE", Const, 0}, + {"IFF_CANTCONFIG", Const, 1}, + {"IFF_DEBUG", Const, 0}, + {"IFF_DRV_OACTIVE", Const, 0}, + {"IFF_DRV_RUNNING", Const, 0}, + {"IFF_DYING", Const, 0}, + {"IFF_DYNAMIC", Const, 0}, + {"IFF_LINK0", Const, 0}, + {"IFF_LINK1", Const, 0}, + {"IFF_LINK2", Const, 0}, + {"IFF_LOOPBACK", Const, 0}, + {"IFF_MASTER", Const, 0}, + {"IFF_MONITOR", Const, 0}, + {"IFF_MULTICAST", Const, 0}, + {"IFF_NOARP", Const, 0}, + {"IFF_NOTRAILERS", Const, 0}, + {"IFF_NO_PI", Const, 0}, + {"IFF_OACTIVE", Const, 0}, + {"IFF_ONE_QUEUE", Const, 0}, + {"IFF_POINTOPOINT", Const, 0}, + {"IFF_POINTTOPOINT", Const, 0}, + {"IFF_PORTSEL", Const, 0}, + {"IFF_PPROMISC", Const, 0}, + {"IFF_PROMISC", Const, 0}, + {"IFF_RENAMING", Const, 0}, + {"IFF_RUNNING", Const, 0}, + {"IFF_SIMPLEX", Const, 0}, + {"IFF_SLAVE", Const, 0}, + {"IFF_SMART", Const, 0}, + {"IFF_STATICARP", Const, 0}, + {"IFF_TAP", Const, 0}, + {"IFF_TUN", Const, 0}, + {"IFF_TUN_EXCL", Const, 0}, + {"IFF_UP", Const, 0}, + {"IFF_VNET_HDR", Const, 0}, + {"IFLA_ADDRESS", Const, 0}, + {"IFLA_BROADCAST", Const, 0}, + {"IFLA_COST", Const, 0}, + {"IFLA_IFALIAS", Const, 0}, + {"IFLA_IFNAME", Const, 0}, + {"IFLA_LINK", Const, 0}, + {"IFLA_LINKINFO", Const, 0}, + {"IFLA_LINKMODE", Const, 0}, + {"IFLA_MAP", Const, 0}, + {"IFLA_MASTER", Const, 0}, + {"IFLA_MAX", Const, 0}, + {"IFLA_MTU", Const, 0}, + {"IFLA_NET_NS_PID", Const, 0}, + {"IFLA_OPERSTATE", Const, 0}, + {"IFLA_PRIORITY", Const, 0}, + {"IFLA_PROTINFO", Const, 0}, + {"IFLA_QDISC", Const, 0}, + {"IFLA_STATS", Const, 0}, + {"IFLA_TXQLEN", Const, 0}, + {"IFLA_UNSPEC", Const, 0}, + {"IFLA_WEIGHT", Const, 0}, + {"IFLA_WIRELESS", Const, 0}, + {"IFNAMSIZ", Const, 0}, + {"IFT_1822", Const, 0}, + {"IFT_A12MPPSWITCH", Const, 0}, + {"IFT_AAL2", Const, 0}, + {"IFT_AAL5", Const, 0}, + {"IFT_ADSL", Const, 0}, + {"IFT_AFLANE8023", Const, 0}, + {"IFT_AFLANE8025", Const, 0}, + {"IFT_ARAP", Const, 0}, + {"IFT_ARCNET", Const, 0}, + {"IFT_ARCNETPLUS", Const, 0}, + {"IFT_ASYNC", Const, 0}, + {"IFT_ATM", Const, 0}, + {"IFT_ATMDXI", Const, 0}, + {"IFT_ATMFUNI", Const, 0}, + {"IFT_ATMIMA", Const, 0}, + {"IFT_ATMLOGICAL", Const, 0}, + {"IFT_ATMRADIO", Const, 0}, + {"IFT_ATMSUBINTERFACE", Const, 0}, + {"IFT_ATMVCIENDPT", Const, 0}, + {"IFT_ATMVIRTUAL", Const, 0}, + {"IFT_BGPPOLICYACCOUNTING", Const, 0}, + {"IFT_BLUETOOTH", Const, 1}, + {"IFT_BRIDGE", Const, 0}, + {"IFT_BSC", Const, 0}, + {"IFT_CARP", Const, 0}, + {"IFT_CCTEMUL", Const, 0}, + {"IFT_CELLULAR", Const, 0}, + {"IFT_CEPT", Const, 0}, + {"IFT_CES", Const, 0}, + {"IFT_CHANNEL", Const, 0}, + {"IFT_CNR", Const, 0}, + {"IFT_COFFEE", Const, 0}, + {"IFT_COMPOSITELINK", Const, 0}, + {"IFT_DCN", Const, 0}, + {"IFT_DIGITALPOWERLINE", Const, 0}, + {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0}, + {"IFT_DLSW", Const, 0}, + {"IFT_DOCSCABLEDOWNSTREAM", Const, 0}, + {"IFT_DOCSCABLEMACLAYER", Const, 0}, + {"IFT_DOCSCABLEUPSTREAM", Const, 0}, + {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1}, + {"IFT_DS0", Const, 0}, + {"IFT_DS0BUNDLE", Const, 0}, + {"IFT_DS1FDL", Const, 0}, + {"IFT_DS3", Const, 0}, + {"IFT_DTM", Const, 0}, + {"IFT_DUMMY", Const, 1}, + {"IFT_DVBASILN", Const, 0}, + {"IFT_DVBASIOUT", Const, 0}, + {"IFT_DVBRCCDOWNSTREAM", Const, 0}, + {"IFT_DVBRCCMACLAYER", Const, 0}, + {"IFT_DVBRCCUPSTREAM", Const, 0}, + {"IFT_ECONET", Const, 1}, + {"IFT_ENC", Const, 0}, + {"IFT_EON", Const, 0}, + {"IFT_EPLRS", Const, 0}, + {"IFT_ESCON", Const, 0}, + {"IFT_ETHER", Const, 0}, + {"IFT_FAITH", Const, 0}, + {"IFT_FAST", Const, 0}, + {"IFT_FASTETHER", Const, 0}, + {"IFT_FASTETHERFX", Const, 0}, + {"IFT_FDDI", Const, 0}, + {"IFT_FIBRECHANNEL", Const, 0}, + {"IFT_FRAMERELAYINTERCONNECT", Const, 0}, + {"IFT_FRAMERELAYMPI", Const, 0}, + {"IFT_FRDLCIENDPT", Const, 0}, + {"IFT_FRELAY", Const, 0}, + {"IFT_FRELAYDCE", Const, 0}, + {"IFT_FRF16MFRBUNDLE", Const, 0}, + {"IFT_FRFORWARD", Const, 0}, + {"IFT_G703AT2MB", Const, 0}, + {"IFT_G703AT64K", Const, 0}, + {"IFT_GIF", Const, 0}, + {"IFT_GIGABITETHERNET", Const, 0}, + {"IFT_GR303IDT", Const, 0}, + {"IFT_GR303RDT", Const, 0}, + {"IFT_H323GATEKEEPER", Const, 0}, + {"IFT_H323PROXY", Const, 0}, + {"IFT_HDH1822", Const, 0}, + {"IFT_HDLC", Const, 0}, + {"IFT_HDSL2", Const, 0}, + {"IFT_HIPERLAN2", Const, 0}, + {"IFT_HIPPI", Const, 0}, + {"IFT_HIPPIINTERFACE", Const, 0}, + {"IFT_HOSTPAD", Const, 0}, + {"IFT_HSSI", Const, 0}, + {"IFT_HY", Const, 0}, + {"IFT_IBM370PARCHAN", Const, 0}, + {"IFT_IDSL", Const, 0}, + {"IFT_IEEE1394", Const, 0}, + {"IFT_IEEE80211", Const, 0}, + {"IFT_IEEE80212", Const, 0}, + {"IFT_IEEE8023ADLAG", Const, 0}, + {"IFT_IFGSN", Const, 0}, + {"IFT_IMT", Const, 0}, + {"IFT_INFINIBAND", Const, 1}, + {"IFT_INTERLEAVE", Const, 0}, + {"IFT_IP", Const, 0}, + {"IFT_IPFORWARD", Const, 0}, + {"IFT_IPOVERATM", Const, 0}, + {"IFT_IPOVERCDLC", Const, 0}, + {"IFT_IPOVERCLAW", Const, 0}, + {"IFT_IPSWITCH", Const, 0}, + {"IFT_IPXIP", Const, 0}, + {"IFT_ISDN", Const, 0}, + {"IFT_ISDNBASIC", Const, 0}, + {"IFT_ISDNPRIMARY", Const, 0}, + {"IFT_ISDNS", Const, 0}, + {"IFT_ISDNU", Const, 0}, + {"IFT_ISO88022LLC", Const, 0}, + {"IFT_ISO88023", Const, 0}, + {"IFT_ISO88024", Const, 0}, + {"IFT_ISO88025", Const, 0}, + {"IFT_ISO88025CRFPINT", Const, 0}, + {"IFT_ISO88025DTR", Const, 0}, + {"IFT_ISO88025FIBER", Const, 0}, + {"IFT_ISO88026", Const, 0}, + {"IFT_ISUP", Const, 0}, + {"IFT_L2VLAN", Const, 0}, + {"IFT_L3IPVLAN", Const, 0}, + {"IFT_L3IPXVLAN", Const, 0}, + {"IFT_LAPB", Const, 0}, + {"IFT_LAPD", Const, 0}, + {"IFT_LAPF", Const, 0}, + {"IFT_LINEGROUP", Const, 1}, + {"IFT_LOCALTALK", Const, 0}, + {"IFT_LOOP", Const, 0}, + {"IFT_MEDIAMAILOVERIP", Const, 0}, + {"IFT_MFSIGLINK", Const, 0}, + {"IFT_MIOX25", Const, 0}, + {"IFT_MODEM", Const, 0}, + {"IFT_MPC", Const, 0}, + {"IFT_MPLS", Const, 0}, + {"IFT_MPLSTUNNEL", Const, 0}, + {"IFT_MSDSL", Const, 0}, + {"IFT_MVL", Const, 0}, + {"IFT_MYRINET", Const, 0}, + {"IFT_NFAS", Const, 0}, + {"IFT_NSIP", Const, 0}, + {"IFT_OPTICALCHANNEL", Const, 0}, + {"IFT_OPTICALTRANSPORT", Const, 0}, + {"IFT_OTHER", Const, 0}, + {"IFT_P10", Const, 0}, + {"IFT_P80", Const, 0}, + {"IFT_PARA", Const, 0}, + {"IFT_PDP", Const, 0}, + {"IFT_PFLOG", Const, 0}, + {"IFT_PFLOW", Const, 1}, + {"IFT_PFSYNC", Const, 0}, + {"IFT_PLC", Const, 0}, + {"IFT_PON155", Const, 1}, + {"IFT_PON622", Const, 1}, + {"IFT_POS", Const, 0}, + {"IFT_PPP", Const, 0}, + {"IFT_PPPMULTILINKBUNDLE", Const, 0}, + {"IFT_PROPATM", Const, 1}, + {"IFT_PROPBWAP2MP", Const, 0}, + {"IFT_PROPCNLS", Const, 0}, + {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0}, + {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0}, + {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0}, + {"IFT_PROPMUX", Const, 0}, + {"IFT_PROPVIRTUAL", Const, 0}, + {"IFT_PROPWIRELESSP2P", Const, 0}, + {"IFT_PTPSERIAL", Const, 0}, + {"IFT_PVC", Const, 0}, + {"IFT_Q2931", Const, 1}, + {"IFT_QLLC", Const, 0}, + {"IFT_RADIOMAC", Const, 0}, + {"IFT_RADSL", Const, 0}, + {"IFT_REACHDSL", Const, 0}, + {"IFT_RFC1483", Const, 0}, + {"IFT_RS232", Const, 0}, + {"IFT_RSRB", Const, 0}, + {"IFT_SDLC", Const, 0}, + {"IFT_SDSL", Const, 0}, + {"IFT_SHDSL", Const, 0}, + {"IFT_SIP", Const, 0}, + {"IFT_SIPSIG", Const, 1}, + {"IFT_SIPTG", Const, 1}, + {"IFT_SLIP", Const, 0}, + {"IFT_SMDSDXI", Const, 0}, + {"IFT_SMDSICIP", Const, 0}, + {"IFT_SONET", Const, 0}, + {"IFT_SONETOVERHEADCHANNEL", Const, 0}, + {"IFT_SONETPATH", Const, 0}, + {"IFT_SONETVT", Const, 0}, + {"IFT_SRP", Const, 0}, + {"IFT_SS7SIGLINK", Const, 0}, + {"IFT_STACKTOSTACK", Const, 0}, + {"IFT_STARLAN", Const, 0}, + {"IFT_STF", Const, 0}, + {"IFT_T1", Const, 0}, + {"IFT_TDLC", Const, 0}, + {"IFT_TELINK", Const, 1}, + {"IFT_TERMPAD", Const, 0}, + {"IFT_TR008", Const, 0}, + {"IFT_TRANSPHDLC", Const, 0}, + {"IFT_TUNNEL", Const, 0}, + {"IFT_ULTRA", Const, 0}, + {"IFT_USB", Const, 0}, + {"IFT_V11", Const, 0}, + {"IFT_V35", Const, 0}, + {"IFT_V36", Const, 0}, + {"IFT_V37", Const, 0}, + {"IFT_VDSL", Const, 0}, + {"IFT_VIRTUALIPADDRESS", Const, 0}, + {"IFT_VIRTUALTG", Const, 1}, + {"IFT_VOICEDID", Const, 1}, + {"IFT_VOICEEM", Const, 0}, + {"IFT_VOICEEMFGD", Const, 1}, + {"IFT_VOICEENCAP", Const, 0}, + {"IFT_VOICEFGDEANA", Const, 1}, + {"IFT_VOICEFXO", Const, 0}, + {"IFT_VOICEFXS", Const, 0}, + {"IFT_VOICEOVERATM", Const, 0}, + {"IFT_VOICEOVERCABLE", Const, 1}, + {"IFT_VOICEOVERFRAMERELAY", Const, 0}, + {"IFT_VOICEOVERIP", Const, 0}, + {"IFT_X213", Const, 0}, + {"IFT_X25", Const, 0}, + {"IFT_X25DDN", Const, 0}, + {"IFT_X25HUNTGROUP", Const, 0}, + {"IFT_X25MLP", Const, 0}, + {"IFT_X25PLE", Const, 0}, + {"IFT_XETHER", Const, 0}, + {"IGNBRK", Const, 0}, + {"IGNCR", Const, 0}, + {"IGNORE", Const, 0}, + {"IGNPAR", Const, 0}, + {"IMAXBEL", Const, 0}, + {"INFINITE", Const, 0}, + {"INLCR", Const, 0}, + {"INPCK", Const, 0}, + {"INVALID_FILE_ATTRIBUTES", Const, 0}, + {"IN_ACCESS", Const, 0}, + {"IN_ALL_EVENTS", Const, 0}, + {"IN_ATTRIB", Const, 0}, + {"IN_CLASSA_HOST", Const, 0}, + {"IN_CLASSA_MAX", Const, 0}, + {"IN_CLASSA_NET", Const, 0}, + {"IN_CLASSA_NSHIFT", Const, 0}, + {"IN_CLASSB_HOST", Const, 0}, + {"IN_CLASSB_MAX", Const, 0}, + {"IN_CLASSB_NET", Const, 0}, + {"IN_CLASSB_NSHIFT", Const, 0}, + {"IN_CLASSC_HOST", Const, 0}, + {"IN_CLASSC_NET", Const, 0}, + {"IN_CLASSC_NSHIFT", Const, 0}, + {"IN_CLASSD_HOST", Const, 0}, + {"IN_CLASSD_NET", Const, 0}, + {"IN_CLASSD_NSHIFT", Const, 0}, + {"IN_CLOEXEC", Const, 0}, + {"IN_CLOSE", Const, 0}, + {"IN_CLOSE_NOWRITE", Const, 0}, + {"IN_CLOSE_WRITE", Const, 0}, + {"IN_CREATE", Const, 0}, + {"IN_DELETE", Const, 0}, + {"IN_DELETE_SELF", Const, 0}, + {"IN_DONT_FOLLOW", Const, 0}, + {"IN_EXCL_UNLINK", Const, 0}, + {"IN_IGNORED", Const, 0}, + {"IN_ISDIR", Const, 0}, + {"IN_LINKLOCALNETNUM", Const, 0}, + {"IN_LOOPBACKNET", Const, 0}, + {"IN_MASK_ADD", Const, 0}, + {"IN_MODIFY", Const, 0}, + {"IN_MOVE", Const, 0}, + {"IN_MOVED_FROM", Const, 0}, + {"IN_MOVED_TO", Const, 0}, + {"IN_MOVE_SELF", Const, 0}, + {"IN_NONBLOCK", Const, 0}, + {"IN_ONESHOT", Const, 0}, + {"IN_ONLYDIR", Const, 0}, + {"IN_OPEN", Const, 0}, + {"IN_Q_OVERFLOW", Const, 0}, + {"IN_RFC3021_HOST", Const, 1}, + {"IN_RFC3021_MASK", Const, 1}, + {"IN_RFC3021_NET", Const, 1}, + {"IN_RFC3021_NSHIFT", Const, 1}, + {"IN_UNMOUNT", Const, 0}, + {"IOC_IN", Const, 1}, + {"IOC_INOUT", Const, 1}, + {"IOC_OUT", Const, 1}, + {"IOC_VENDOR", Const, 3}, + {"IOC_WS2", Const, 1}, + {"IO_REPARSE_TAG_SYMLINK", Const, 4}, + {"IPMreq", Type, 0}, + {"IPMreq.Interface", Field, 0}, + {"IPMreq.Multiaddr", Field, 0}, + {"IPMreqn", Type, 0}, + {"IPMreqn.Address", Field, 0}, + {"IPMreqn.Ifindex", Field, 0}, + {"IPMreqn.Multiaddr", Field, 0}, + {"IPPROTO_3PC", Const, 0}, + {"IPPROTO_ADFS", Const, 0}, + {"IPPROTO_AH", Const, 0}, + {"IPPROTO_AHIP", Const, 0}, + {"IPPROTO_APES", Const, 0}, + {"IPPROTO_ARGUS", Const, 0}, + {"IPPROTO_AX25", Const, 0}, + {"IPPROTO_BHA", Const, 0}, + {"IPPROTO_BLT", Const, 0}, + {"IPPROTO_BRSATMON", Const, 0}, + {"IPPROTO_CARP", Const, 0}, + {"IPPROTO_CFTP", Const, 0}, + {"IPPROTO_CHAOS", Const, 0}, + {"IPPROTO_CMTP", Const, 0}, + {"IPPROTO_COMP", Const, 0}, + {"IPPROTO_CPHB", Const, 0}, + {"IPPROTO_CPNX", Const, 0}, + {"IPPROTO_DCCP", Const, 0}, + {"IPPROTO_DDP", Const, 0}, + {"IPPROTO_DGP", Const, 0}, + {"IPPROTO_DIVERT", Const, 0}, + {"IPPROTO_DIVERT_INIT", Const, 3}, + {"IPPROTO_DIVERT_RESP", Const, 3}, + {"IPPROTO_DONE", Const, 0}, + {"IPPROTO_DSTOPTS", Const, 0}, + {"IPPROTO_EGP", Const, 0}, + {"IPPROTO_EMCON", Const, 0}, + {"IPPROTO_ENCAP", Const, 0}, + {"IPPROTO_EON", Const, 0}, + {"IPPROTO_ESP", Const, 0}, + {"IPPROTO_ETHERIP", Const, 0}, + {"IPPROTO_FRAGMENT", Const, 0}, + {"IPPROTO_GGP", Const, 0}, + {"IPPROTO_GMTP", Const, 0}, + {"IPPROTO_GRE", Const, 0}, + {"IPPROTO_HELLO", Const, 0}, + {"IPPROTO_HMP", Const, 0}, + {"IPPROTO_HOPOPTS", Const, 0}, + {"IPPROTO_ICMP", Const, 0}, + {"IPPROTO_ICMPV6", Const, 0}, + {"IPPROTO_IDP", Const, 0}, + {"IPPROTO_IDPR", Const, 0}, + {"IPPROTO_IDRP", Const, 0}, + {"IPPROTO_IGMP", Const, 0}, + {"IPPROTO_IGP", Const, 0}, + {"IPPROTO_IGRP", Const, 0}, + {"IPPROTO_IL", Const, 0}, + {"IPPROTO_INLSP", Const, 0}, + {"IPPROTO_INP", Const, 0}, + {"IPPROTO_IP", Const, 0}, + {"IPPROTO_IPCOMP", Const, 0}, + {"IPPROTO_IPCV", Const, 0}, + {"IPPROTO_IPEIP", Const, 0}, + {"IPPROTO_IPIP", Const, 0}, + {"IPPROTO_IPPC", Const, 0}, + {"IPPROTO_IPV4", Const, 0}, + {"IPPROTO_IPV6", Const, 0}, + {"IPPROTO_IPV6_ICMP", Const, 1}, + {"IPPROTO_IRTP", Const, 0}, + {"IPPROTO_KRYPTOLAN", Const, 0}, + {"IPPROTO_LARP", Const, 0}, + {"IPPROTO_LEAF1", Const, 0}, + {"IPPROTO_LEAF2", Const, 0}, + {"IPPROTO_MAX", Const, 0}, + {"IPPROTO_MAXID", Const, 0}, + {"IPPROTO_MEAS", Const, 0}, + {"IPPROTO_MH", Const, 1}, + {"IPPROTO_MHRP", Const, 0}, + {"IPPROTO_MICP", Const, 0}, + {"IPPROTO_MOBILE", Const, 0}, + {"IPPROTO_MPLS", Const, 1}, + {"IPPROTO_MTP", Const, 0}, + {"IPPROTO_MUX", Const, 0}, + {"IPPROTO_ND", Const, 0}, + {"IPPROTO_NHRP", Const, 0}, + {"IPPROTO_NONE", Const, 0}, + {"IPPROTO_NSP", Const, 0}, + {"IPPROTO_NVPII", Const, 0}, + {"IPPROTO_OLD_DIVERT", Const, 0}, + {"IPPROTO_OSPFIGP", Const, 0}, + {"IPPROTO_PFSYNC", Const, 0}, + {"IPPROTO_PGM", Const, 0}, + {"IPPROTO_PIGP", Const, 0}, + {"IPPROTO_PIM", Const, 0}, + {"IPPROTO_PRM", Const, 0}, + {"IPPROTO_PUP", Const, 0}, + {"IPPROTO_PVP", Const, 0}, + {"IPPROTO_RAW", Const, 0}, + {"IPPROTO_RCCMON", Const, 0}, + {"IPPROTO_RDP", Const, 0}, + {"IPPROTO_ROUTING", Const, 0}, + {"IPPROTO_RSVP", Const, 0}, + {"IPPROTO_RVD", Const, 0}, + {"IPPROTO_SATEXPAK", Const, 0}, + {"IPPROTO_SATMON", Const, 0}, + {"IPPROTO_SCCSP", Const, 0}, + {"IPPROTO_SCTP", Const, 0}, + {"IPPROTO_SDRP", Const, 0}, + {"IPPROTO_SEND", Const, 1}, + {"IPPROTO_SEP", Const, 0}, + {"IPPROTO_SKIP", Const, 0}, + {"IPPROTO_SPACER", Const, 0}, + {"IPPROTO_SRPC", Const, 0}, + {"IPPROTO_ST", Const, 0}, + {"IPPROTO_SVMTP", Const, 0}, + {"IPPROTO_SWIPE", Const, 0}, + {"IPPROTO_TCF", Const, 0}, + {"IPPROTO_TCP", Const, 0}, + {"IPPROTO_TLSP", Const, 0}, + {"IPPROTO_TP", Const, 0}, + {"IPPROTO_TPXX", Const, 0}, + {"IPPROTO_TRUNK1", Const, 0}, + {"IPPROTO_TRUNK2", Const, 0}, + {"IPPROTO_TTP", Const, 0}, + {"IPPROTO_UDP", Const, 0}, + {"IPPROTO_UDPLITE", Const, 0}, + {"IPPROTO_VINES", Const, 0}, + {"IPPROTO_VISA", Const, 0}, + {"IPPROTO_VMTP", Const, 0}, + {"IPPROTO_VRRP", Const, 1}, + {"IPPROTO_WBEXPAK", Const, 0}, + {"IPPROTO_WBMON", Const, 0}, + {"IPPROTO_WSN", Const, 0}, + {"IPPROTO_XNET", Const, 0}, + {"IPPROTO_XTP", Const, 0}, + {"IPV6_2292DSTOPTS", Const, 0}, + {"IPV6_2292HOPLIMIT", Const, 0}, + {"IPV6_2292HOPOPTS", Const, 0}, + {"IPV6_2292NEXTHOP", Const, 0}, + {"IPV6_2292PKTINFO", Const, 0}, + {"IPV6_2292PKTOPTIONS", Const, 0}, + {"IPV6_2292RTHDR", Const, 0}, + {"IPV6_ADDRFORM", Const, 0}, + {"IPV6_ADD_MEMBERSHIP", Const, 0}, + {"IPV6_AUTHHDR", Const, 0}, + {"IPV6_AUTH_LEVEL", Const, 1}, + {"IPV6_AUTOFLOWLABEL", Const, 0}, + {"IPV6_BINDANY", Const, 0}, + {"IPV6_BINDV6ONLY", Const, 0}, + {"IPV6_BOUND_IF", Const, 0}, + {"IPV6_CHECKSUM", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IPV6_DEFHLIM", Const, 0}, + {"IPV6_DONTFRAG", Const, 0}, + {"IPV6_DROP_MEMBERSHIP", Const, 0}, + {"IPV6_DSTOPTS", Const, 0}, + {"IPV6_ESP_NETWORK_LEVEL", Const, 1}, + {"IPV6_ESP_TRANS_LEVEL", Const, 1}, + {"IPV6_FAITH", Const, 0}, + {"IPV6_FLOWINFO_MASK", Const, 0}, + {"IPV6_FLOWLABEL_MASK", Const, 0}, + {"IPV6_FRAGTTL", Const, 0}, + {"IPV6_FW_ADD", Const, 0}, + {"IPV6_FW_DEL", Const, 0}, + {"IPV6_FW_FLUSH", Const, 0}, + {"IPV6_FW_GET", Const, 0}, + {"IPV6_FW_ZERO", Const, 0}, + {"IPV6_HLIMDEC", Const, 0}, + {"IPV6_HOPLIMIT", Const, 0}, + {"IPV6_HOPOPTS", Const, 0}, + {"IPV6_IPCOMP_LEVEL", Const, 1}, + {"IPV6_IPSEC_POLICY", Const, 0}, + {"IPV6_JOIN_ANYCAST", Const, 0}, + {"IPV6_JOIN_GROUP", Const, 0}, + {"IPV6_LEAVE_ANYCAST", Const, 0}, + {"IPV6_LEAVE_GROUP", Const, 0}, + {"IPV6_MAXHLIM", Const, 0}, + {"IPV6_MAXOPTHDR", Const, 0}, + {"IPV6_MAXPACKET", Const, 0}, + {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IPV6_MAX_MEMBERSHIPS", Const, 0}, + {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IPV6_MIN_MEMBERSHIPS", Const, 0}, + {"IPV6_MMTU", Const, 0}, + {"IPV6_MSFILTER", Const, 0}, + {"IPV6_MTU", Const, 0}, + {"IPV6_MTU_DISCOVER", Const, 0}, + {"IPV6_MULTICAST_HOPS", Const, 0}, + {"IPV6_MULTICAST_IF", Const, 0}, + {"IPV6_MULTICAST_LOOP", Const, 0}, + {"IPV6_NEXTHOP", Const, 0}, + {"IPV6_OPTIONS", Const, 1}, + {"IPV6_PATHMTU", Const, 0}, + {"IPV6_PIPEX", Const, 1}, + {"IPV6_PKTINFO", Const, 0}, + {"IPV6_PMTUDISC_DO", Const, 0}, + {"IPV6_PMTUDISC_DONT", Const, 0}, + {"IPV6_PMTUDISC_PROBE", Const, 0}, + {"IPV6_PMTUDISC_WANT", Const, 0}, + {"IPV6_PORTRANGE", Const, 0}, + {"IPV6_PORTRANGE_DEFAULT", Const, 0}, + {"IPV6_PORTRANGE_HIGH", Const, 0}, + {"IPV6_PORTRANGE_LOW", Const, 0}, + {"IPV6_PREFER_TEMPADDR", Const, 0}, + {"IPV6_RECVDSTOPTS", Const, 0}, + {"IPV6_RECVDSTPORT", Const, 3}, + {"IPV6_RECVERR", Const, 0}, + {"IPV6_RECVHOPLIMIT", Const, 0}, + {"IPV6_RECVHOPOPTS", Const, 0}, + {"IPV6_RECVPATHMTU", Const, 0}, + {"IPV6_RECVPKTINFO", Const, 0}, + {"IPV6_RECVRTHDR", Const, 0}, + {"IPV6_RECVTCLASS", Const, 0}, + {"IPV6_ROUTER_ALERT", Const, 0}, + {"IPV6_RTABLE", Const, 1}, + {"IPV6_RTHDR", Const, 0}, + {"IPV6_RTHDRDSTOPTS", Const, 0}, + {"IPV6_RTHDR_LOOSE", Const, 0}, + {"IPV6_RTHDR_STRICT", Const, 0}, + {"IPV6_RTHDR_TYPE_0", Const, 0}, + {"IPV6_RXDSTOPTS", Const, 0}, + {"IPV6_RXHOPOPTS", Const, 0}, + {"IPV6_SOCKOPT_RESERVED1", Const, 0}, + {"IPV6_TCLASS", Const, 0}, + {"IPV6_UNICAST_HOPS", Const, 0}, + {"IPV6_USE_MIN_MTU", Const, 0}, + {"IPV6_V6ONLY", Const, 0}, + {"IPV6_VERSION", Const, 0}, + {"IPV6_VERSION_MASK", Const, 0}, + {"IPV6_XFRM_POLICY", Const, 0}, + {"IP_ADD_MEMBERSHIP", Const, 0}, + {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_AUTH_LEVEL", Const, 1}, + {"IP_BINDANY", Const, 0}, + {"IP_BLOCK_SOURCE", Const, 0}, + {"IP_BOUND_IF", Const, 0}, + {"IP_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IP_DEFAULT_MULTICAST_TTL", Const, 0}, + {"IP_DF", Const, 0}, + {"IP_DIVERTFL", Const, 3}, + {"IP_DONTFRAG", Const, 0}, + {"IP_DROP_MEMBERSHIP", Const, 0}, + {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_DUMMYNET3", Const, 0}, + {"IP_DUMMYNET_CONFIGURE", Const, 0}, + {"IP_DUMMYNET_DEL", Const, 0}, + {"IP_DUMMYNET_FLUSH", Const, 0}, + {"IP_DUMMYNET_GET", Const, 0}, + {"IP_EF", Const, 1}, + {"IP_ERRORMTU", Const, 1}, + {"IP_ESP_NETWORK_LEVEL", Const, 1}, + {"IP_ESP_TRANS_LEVEL", Const, 1}, + {"IP_FAITH", Const, 0}, + {"IP_FREEBIND", Const, 0}, + {"IP_FW3", Const, 0}, + {"IP_FW_ADD", Const, 0}, + {"IP_FW_DEL", Const, 0}, + {"IP_FW_FLUSH", Const, 0}, + {"IP_FW_GET", Const, 0}, + {"IP_FW_NAT_CFG", Const, 0}, + {"IP_FW_NAT_DEL", Const, 0}, + {"IP_FW_NAT_GET_CONFIG", Const, 0}, + {"IP_FW_NAT_GET_LOG", Const, 0}, + {"IP_FW_RESETLOG", Const, 0}, + {"IP_FW_TABLE_ADD", Const, 0}, + {"IP_FW_TABLE_DEL", Const, 0}, + {"IP_FW_TABLE_FLUSH", Const, 0}, + {"IP_FW_TABLE_GETSIZE", Const, 0}, + {"IP_FW_TABLE_LIST", Const, 0}, + {"IP_FW_ZERO", Const, 0}, + {"IP_HDRINCL", Const, 0}, + {"IP_IPCOMP_LEVEL", Const, 1}, + {"IP_IPSECFLOWINFO", Const, 1}, + {"IP_IPSEC_LOCAL_AUTH", Const, 1}, + {"IP_IPSEC_LOCAL_CRED", Const, 1}, + {"IP_IPSEC_LOCAL_ID", Const, 1}, + {"IP_IPSEC_POLICY", Const, 0}, + {"IP_IPSEC_REMOTE_AUTH", Const, 1}, + {"IP_IPSEC_REMOTE_CRED", Const, 1}, + {"IP_IPSEC_REMOTE_ID", Const, 1}, + {"IP_MAXPACKET", Const, 0}, + {"IP_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IP_MAX_MEMBERSHIPS", Const, 0}, + {"IP_MAX_SOCK_MUTE_FILTER", Const, 0}, + {"IP_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IP_MAX_SOURCE_FILTER", Const, 0}, + {"IP_MF", Const, 0}, + {"IP_MINFRAGSIZE", Const, 1}, + {"IP_MINTTL", Const, 0}, + {"IP_MIN_MEMBERSHIPS", Const, 0}, + {"IP_MSFILTER", Const, 0}, + {"IP_MSS", Const, 0}, + {"IP_MTU", Const, 0}, + {"IP_MTU_DISCOVER", Const, 0}, + {"IP_MULTICAST_IF", Const, 0}, + {"IP_MULTICAST_IFINDEX", Const, 0}, + {"IP_MULTICAST_LOOP", Const, 0}, + {"IP_MULTICAST_TTL", Const, 0}, + {"IP_MULTICAST_VIF", Const, 0}, + {"IP_NAT__XXX", Const, 0}, + {"IP_OFFMASK", Const, 0}, + {"IP_OLD_FW_ADD", Const, 0}, + {"IP_OLD_FW_DEL", Const, 0}, + {"IP_OLD_FW_FLUSH", Const, 0}, + {"IP_OLD_FW_GET", Const, 0}, + {"IP_OLD_FW_RESETLOG", Const, 0}, + {"IP_OLD_FW_ZERO", Const, 0}, + {"IP_ONESBCAST", Const, 0}, + {"IP_OPTIONS", Const, 0}, + {"IP_ORIGDSTADDR", Const, 0}, + {"IP_PASSSEC", Const, 0}, + {"IP_PIPEX", Const, 1}, + {"IP_PKTINFO", Const, 0}, + {"IP_PKTOPTIONS", Const, 0}, + {"IP_PMTUDISC", Const, 0}, + {"IP_PMTUDISC_DO", Const, 0}, + {"IP_PMTUDISC_DONT", Const, 0}, + {"IP_PMTUDISC_PROBE", Const, 0}, + {"IP_PMTUDISC_WANT", Const, 0}, + {"IP_PORTRANGE", Const, 0}, + {"IP_PORTRANGE_DEFAULT", Const, 0}, + {"IP_PORTRANGE_HIGH", Const, 0}, + {"IP_PORTRANGE_LOW", Const, 0}, + {"IP_RECVDSTADDR", Const, 0}, + {"IP_RECVDSTPORT", Const, 1}, + {"IP_RECVERR", Const, 0}, + {"IP_RECVIF", Const, 0}, + {"IP_RECVOPTS", Const, 0}, + {"IP_RECVORIGDSTADDR", Const, 0}, + {"IP_RECVPKTINFO", Const, 0}, + {"IP_RECVRETOPTS", Const, 0}, + {"IP_RECVRTABLE", Const, 1}, + {"IP_RECVTOS", Const, 0}, + {"IP_RECVTTL", Const, 0}, + {"IP_RETOPTS", Const, 0}, + {"IP_RF", Const, 0}, + {"IP_ROUTER_ALERT", Const, 0}, + {"IP_RSVP_OFF", Const, 0}, + {"IP_RSVP_ON", Const, 0}, + {"IP_RSVP_VIF_OFF", Const, 0}, + {"IP_RSVP_VIF_ON", Const, 0}, + {"IP_RTABLE", Const, 1}, + {"IP_SENDSRCADDR", Const, 0}, + {"IP_STRIPHDR", Const, 0}, + {"IP_TOS", Const, 0}, + {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0}, + {"IP_TRANSPARENT", Const, 0}, + {"IP_TTL", Const, 0}, + {"IP_UNBLOCK_SOURCE", Const, 0}, + {"IP_XFRM_POLICY", Const, 0}, + {"IPv6MTUInfo", Type, 2}, + {"IPv6MTUInfo.Addr", Field, 2}, + {"IPv6MTUInfo.Mtu", Field, 2}, + {"IPv6Mreq", Type, 0}, + {"IPv6Mreq.Interface", Field, 0}, + {"IPv6Mreq.Multiaddr", Field, 0}, + {"ISIG", Const, 0}, + {"ISTRIP", Const, 0}, + {"IUCLC", Const, 0}, + {"IUTF8", Const, 0}, + {"IXANY", Const, 0}, + {"IXOFF", Const, 0}, + {"IXON", Const, 0}, + {"IfAddrmsg", Type, 0}, + {"IfAddrmsg.Family", Field, 0}, + {"IfAddrmsg.Flags", Field, 0}, + {"IfAddrmsg.Index", Field, 0}, + {"IfAddrmsg.Prefixlen", Field, 0}, + {"IfAddrmsg.Scope", Field, 0}, + {"IfAnnounceMsghdr", Type, 1}, + {"IfAnnounceMsghdr.Hdrlen", Field, 2}, + {"IfAnnounceMsghdr.Index", Field, 1}, + {"IfAnnounceMsghdr.Msglen", Field, 1}, + {"IfAnnounceMsghdr.Name", Field, 1}, + {"IfAnnounceMsghdr.Type", Field, 1}, + {"IfAnnounceMsghdr.Version", Field, 1}, + {"IfAnnounceMsghdr.What", Field, 1}, + {"IfData", Type, 0}, + {"IfData.Addrlen", Field, 0}, + {"IfData.Baudrate", Field, 0}, + {"IfData.Capabilities", Field, 2}, + {"IfData.Collisions", Field, 0}, + {"IfData.Datalen", Field, 0}, + {"IfData.Epoch", Field, 0}, + {"IfData.Hdrlen", Field, 0}, + {"IfData.Hwassist", Field, 0}, + {"IfData.Ibytes", Field, 0}, + {"IfData.Ierrors", Field, 0}, + {"IfData.Imcasts", Field, 0}, + {"IfData.Ipackets", Field, 0}, + {"IfData.Iqdrops", Field, 0}, + {"IfData.Lastchange", Field, 0}, + {"IfData.Link_state", Field, 0}, + {"IfData.Mclpool", Field, 2}, + {"IfData.Metric", Field, 0}, + {"IfData.Mtu", Field, 0}, + {"IfData.Noproto", Field, 0}, + {"IfData.Obytes", Field, 0}, + {"IfData.Oerrors", Field, 0}, + {"IfData.Omcasts", Field, 0}, + {"IfData.Opackets", Field, 0}, + {"IfData.Pad", Field, 2}, + {"IfData.Pad_cgo_0", Field, 2}, + {"IfData.Pad_cgo_1", Field, 2}, + {"IfData.Physical", Field, 0}, + {"IfData.Recvquota", Field, 0}, + {"IfData.Recvtiming", Field, 0}, + {"IfData.Reserved1", Field, 0}, + {"IfData.Reserved2", Field, 0}, + {"IfData.Spare_char1", Field, 0}, + {"IfData.Spare_char2", Field, 0}, + {"IfData.Type", Field, 0}, + {"IfData.Typelen", Field, 0}, + {"IfData.Unused1", Field, 0}, + {"IfData.Unused2", Field, 0}, + {"IfData.Xmitquota", Field, 0}, + {"IfData.Xmittiming", Field, 0}, + {"IfInfomsg", Type, 0}, + {"IfInfomsg.Change", Field, 0}, + {"IfInfomsg.Family", Field, 0}, + {"IfInfomsg.Flags", Field, 0}, + {"IfInfomsg.Index", Field, 0}, + {"IfInfomsg.Type", Field, 0}, + {"IfInfomsg.X__ifi_pad", Field, 0}, + {"IfMsghdr", Type, 0}, + {"IfMsghdr.Addrs", Field, 0}, + {"IfMsghdr.Data", Field, 0}, + {"IfMsghdr.Flags", Field, 0}, + {"IfMsghdr.Hdrlen", Field, 2}, + {"IfMsghdr.Index", Field, 0}, + {"IfMsghdr.Msglen", Field, 0}, + {"IfMsghdr.Pad1", Field, 2}, + {"IfMsghdr.Pad2", Field, 2}, + {"IfMsghdr.Pad_cgo_0", Field, 0}, + {"IfMsghdr.Pad_cgo_1", Field, 2}, + {"IfMsghdr.Tableid", Field, 2}, + {"IfMsghdr.Type", Field, 0}, + {"IfMsghdr.Version", Field, 0}, + {"IfMsghdr.Xflags", Field, 2}, + {"IfaMsghdr", Type, 0}, + {"IfaMsghdr.Addrs", Field, 0}, + {"IfaMsghdr.Flags", Field, 0}, + {"IfaMsghdr.Hdrlen", Field, 2}, + {"IfaMsghdr.Index", Field, 0}, + {"IfaMsghdr.Metric", Field, 0}, + {"IfaMsghdr.Msglen", Field, 0}, + {"IfaMsghdr.Pad1", Field, 2}, + {"IfaMsghdr.Pad2", Field, 2}, + {"IfaMsghdr.Pad_cgo_0", Field, 0}, + {"IfaMsghdr.Tableid", Field, 2}, + {"IfaMsghdr.Type", Field, 0}, + {"IfaMsghdr.Version", Field, 0}, + {"IfmaMsghdr", Type, 0}, + {"IfmaMsghdr.Addrs", Field, 0}, + {"IfmaMsghdr.Flags", Field, 0}, + {"IfmaMsghdr.Index", Field, 0}, + {"IfmaMsghdr.Msglen", Field, 0}, + {"IfmaMsghdr.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr.Type", Field, 0}, + {"IfmaMsghdr.Version", Field, 0}, + {"IfmaMsghdr2", Type, 0}, + {"IfmaMsghdr2.Addrs", Field, 0}, + {"IfmaMsghdr2.Flags", Field, 0}, + {"IfmaMsghdr2.Index", Field, 0}, + {"IfmaMsghdr2.Msglen", Field, 0}, + {"IfmaMsghdr2.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr2.Refcount", Field, 0}, + {"IfmaMsghdr2.Type", Field, 0}, + {"IfmaMsghdr2.Version", Field, 0}, + {"ImplementsGetwd", Const, 0}, + {"Inet4Pktinfo", Type, 0}, + {"Inet4Pktinfo.Addr", Field, 0}, + {"Inet4Pktinfo.Ifindex", Field, 0}, + {"Inet4Pktinfo.Spec_dst", Field, 0}, + {"Inet6Pktinfo", Type, 0}, + {"Inet6Pktinfo.Addr", Field, 0}, + {"Inet6Pktinfo.Ifindex", Field, 0}, + {"InotifyAddWatch", Func, 0}, + {"InotifyEvent", Type, 0}, + {"InotifyEvent.Cookie", Field, 0}, + {"InotifyEvent.Len", Field, 0}, + {"InotifyEvent.Mask", Field, 0}, + {"InotifyEvent.Name", Field, 0}, + {"InotifyEvent.Wd", Field, 0}, + {"InotifyInit", Func, 0}, + {"InotifyInit1", Func, 0}, + {"InotifyRmWatch", Func, 0}, + {"InterfaceAddrMessage", Type, 0}, + {"InterfaceAddrMessage.Data", Field, 0}, + {"InterfaceAddrMessage.Header", Field, 0}, + {"InterfaceAnnounceMessage", Type, 1}, + {"InterfaceAnnounceMessage.Header", Field, 1}, + {"InterfaceInfo", Type, 0}, + {"InterfaceInfo.Address", Field, 0}, + {"InterfaceInfo.BroadcastAddress", Field, 0}, + {"InterfaceInfo.Flags", Field, 0}, + {"InterfaceInfo.Netmask", Field, 0}, + {"InterfaceMessage", Type, 0}, + {"InterfaceMessage.Data", Field, 0}, + {"InterfaceMessage.Header", Field, 0}, + {"InterfaceMulticastAddrMessage", Type, 0}, + {"InterfaceMulticastAddrMessage.Data", Field, 0}, + {"InterfaceMulticastAddrMessage.Header", Field, 0}, + {"InvalidHandle", Const, 0}, + {"Ioperm", Func, 0}, + {"Iopl", Func, 0}, + {"Iovec", Type, 0}, + {"Iovec.Base", Field, 0}, + {"Iovec.Len", Field, 0}, + {"IpAdapterInfo", Type, 0}, + {"IpAdapterInfo.AdapterName", Field, 0}, + {"IpAdapterInfo.Address", Field, 0}, + {"IpAdapterInfo.AddressLength", Field, 0}, + {"IpAdapterInfo.ComboIndex", Field, 0}, + {"IpAdapterInfo.CurrentIpAddress", Field, 0}, + {"IpAdapterInfo.Description", Field, 0}, + {"IpAdapterInfo.DhcpEnabled", Field, 0}, + {"IpAdapterInfo.DhcpServer", Field, 0}, + {"IpAdapterInfo.GatewayList", Field, 0}, + {"IpAdapterInfo.HaveWins", Field, 0}, + {"IpAdapterInfo.Index", Field, 0}, + {"IpAdapterInfo.IpAddressList", Field, 0}, + {"IpAdapterInfo.LeaseExpires", Field, 0}, + {"IpAdapterInfo.LeaseObtained", Field, 0}, + {"IpAdapterInfo.Next", Field, 0}, + {"IpAdapterInfo.PrimaryWinsServer", Field, 0}, + {"IpAdapterInfo.SecondaryWinsServer", Field, 0}, + {"IpAdapterInfo.Type", Field, 0}, + {"IpAddrString", Type, 0}, + {"IpAddrString.Context", Field, 0}, + {"IpAddrString.IpAddress", Field, 0}, + {"IpAddrString.IpMask", Field, 0}, + {"IpAddrString.Next", Field, 0}, + {"IpAddressString", Type, 0}, + {"IpAddressString.String", Field, 0}, + {"IpMaskString", Type, 0}, + {"IpMaskString.String", Field, 2}, + {"Issetugid", Func, 0}, + {"KEY_ALL_ACCESS", Const, 0}, + {"KEY_CREATE_LINK", Const, 0}, + {"KEY_CREATE_SUB_KEY", Const, 0}, + {"KEY_ENUMERATE_SUB_KEYS", Const, 0}, + {"KEY_EXECUTE", Const, 0}, + {"KEY_NOTIFY", Const, 0}, + {"KEY_QUERY_VALUE", Const, 0}, + {"KEY_READ", Const, 0}, + {"KEY_SET_VALUE", Const, 0}, + {"KEY_WOW64_32KEY", Const, 0}, + {"KEY_WOW64_64KEY", Const, 0}, + {"KEY_WRITE", Const, 0}, + {"Kevent", Func, 0}, + {"Kevent_t", Type, 0}, + {"Kevent_t.Data", Field, 0}, + {"Kevent_t.Fflags", Field, 0}, + {"Kevent_t.Filter", Field, 0}, + {"Kevent_t.Flags", Field, 0}, + {"Kevent_t.Ident", Field, 0}, + {"Kevent_t.Pad_cgo_0", Field, 2}, + {"Kevent_t.Udata", Field, 0}, + {"Kill", Func, 0}, + {"Klogctl", Func, 0}, + {"Kqueue", Func, 0}, + {"LANG_ENGLISH", Const, 0}, + {"LAYERED_PROTOCOL", Const, 2}, + {"LCNT_OVERLOAD_FLUSH", Const, 1}, + {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_CAD_ON", Const, 0}, + {"LINUX_REBOOT_CMD_HALT", Const, 0}, + {"LINUX_REBOOT_CMD_KEXEC", Const, 0}, + {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART2", Const, 0}, + {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0}, + {"LINUX_REBOOT_MAGIC1", Const, 0}, + {"LINUX_REBOOT_MAGIC2", Const, 0}, + {"LOCK_EX", Const, 0}, + {"LOCK_NB", Const, 0}, + {"LOCK_SH", Const, 0}, + {"LOCK_UN", Const, 0}, + {"LazyDLL", Type, 0}, + {"LazyDLL.Name", Field, 0}, + {"LazyProc", Type, 0}, + {"LazyProc.Name", Field, 0}, + {"Lchown", Func, 0}, + {"Linger", Type, 0}, + {"Linger.Linger", Field, 0}, + {"Linger.Onoff", Field, 0}, + {"Link", Func, 0}, + {"Listen", Func, 0}, + {"Listxattr", Func, 1}, + {"LoadCancelIoEx", Func, 1}, + {"LoadConnectEx", Func, 1}, + {"LoadCreateSymbolicLink", Func, 4}, + {"LoadDLL", Func, 0}, + {"LoadGetAddrInfo", Func, 1}, + {"LoadLibrary", Func, 0}, + {"LoadSetFileCompletionNotificationModes", Func, 2}, + {"LocalFree", Func, 0}, + {"Log2phys_t", Type, 0}, + {"Log2phys_t.Contigbytes", Field, 0}, + {"Log2phys_t.Devoffset", Field, 0}, + {"Log2phys_t.Flags", Field, 0}, + {"LookupAccountName", Func, 0}, + {"LookupAccountSid", Func, 0}, + {"LookupSID", Func, 0}, + {"LsfJump", Func, 0}, + {"LsfSocket", Func, 0}, + {"LsfStmt", Func, 0}, + {"Lstat", Func, 0}, + {"MADV_AUTOSYNC", Const, 1}, + {"MADV_CAN_REUSE", Const, 0}, + {"MADV_CORE", Const, 1}, + {"MADV_DOFORK", Const, 0}, + {"MADV_DONTFORK", Const, 0}, + {"MADV_DONTNEED", Const, 0}, + {"MADV_FREE", Const, 0}, + {"MADV_FREE_REUSABLE", Const, 0}, + {"MADV_FREE_REUSE", Const, 0}, + {"MADV_HUGEPAGE", Const, 0}, + {"MADV_HWPOISON", Const, 0}, + {"MADV_MERGEABLE", Const, 0}, + {"MADV_NOCORE", Const, 1}, + {"MADV_NOHUGEPAGE", Const, 0}, + {"MADV_NORMAL", Const, 0}, + {"MADV_NOSYNC", Const, 1}, + {"MADV_PROTECT", Const, 1}, + {"MADV_RANDOM", Const, 0}, + {"MADV_REMOVE", Const, 0}, + {"MADV_SEQUENTIAL", Const, 0}, + {"MADV_SPACEAVAIL", Const, 3}, + {"MADV_UNMERGEABLE", Const, 0}, + {"MADV_WILLNEED", Const, 0}, + {"MADV_ZERO_WIRED_PAGES", Const, 0}, + {"MAP_32BIT", Const, 0}, + {"MAP_ALIGNED_SUPER", Const, 3}, + {"MAP_ALIGNMENT_16MB", Const, 3}, + {"MAP_ALIGNMENT_1TB", Const, 3}, + {"MAP_ALIGNMENT_256TB", Const, 3}, + {"MAP_ALIGNMENT_4GB", Const, 3}, + {"MAP_ALIGNMENT_64KB", Const, 3}, + {"MAP_ALIGNMENT_64PB", Const, 3}, + {"MAP_ALIGNMENT_MASK", Const, 3}, + {"MAP_ALIGNMENT_SHIFT", Const, 3}, + {"MAP_ANON", Const, 0}, + {"MAP_ANONYMOUS", Const, 0}, + {"MAP_COPY", Const, 0}, + {"MAP_DENYWRITE", Const, 0}, + {"MAP_EXECUTABLE", Const, 0}, + {"MAP_FILE", Const, 0}, + {"MAP_FIXED", Const, 0}, + {"MAP_FLAGMASK", Const, 3}, + {"MAP_GROWSDOWN", Const, 0}, + {"MAP_HASSEMAPHORE", Const, 0}, + {"MAP_HUGETLB", Const, 0}, + {"MAP_INHERIT", Const, 3}, + {"MAP_INHERIT_COPY", Const, 3}, + {"MAP_INHERIT_DEFAULT", Const, 3}, + {"MAP_INHERIT_DONATE_COPY", Const, 3}, + {"MAP_INHERIT_NONE", Const, 3}, + {"MAP_INHERIT_SHARE", Const, 3}, + {"MAP_JIT", Const, 0}, + {"MAP_LOCKED", Const, 0}, + {"MAP_NOCACHE", Const, 0}, + {"MAP_NOCORE", Const, 1}, + {"MAP_NOEXTEND", Const, 0}, + {"MAP_NONBLOCK", Const, 0}, + {"MAP_NORESERVE", Const, 0}, + {"MAP_NOSYNC", Const, 1}, + {"MAP_POPULATE", Const, 0}, + {"MAP_PREFAULT_READ", Const, 1}, + {"MAP_PRIVATE", Const, 0}, + {"MAP_RENAME", Const, 0}, + {"MAP_RESERVED0080", Const, 0}, + {"MAP_RESERVED0100", Const, 1}, + {"MAP_SHARED", Const, 0}, + {"MAP_STACK", Const, 0}, + {"MAP_TRYFIXED", Const, 3}, + {"MAP_TYPE", Const, 0}, + {"MAP_WIRED", Const, 3}, + {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4}, + {"MAXLEN_IFDESCR", Const, 0}, + {"MAXLEN_PHYSADDR", Const, 0}, + {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0}, + {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0}, + {"MAX_ADAPTER_NAME_LENGTH", Const, 0}, + {"MAX_COMPUTERNAME_LENGTH", Const, 0}, + {"MAX_INTERFACE_NAME_LEN", Const, 0}, + {"MAX_LONG_PATH", Const, 0}, + {"MAX_PATH", Const, 0}, + {"MAX_PROTOCOL_CHAIN", Const, 2}, + {"MCL_CURRENT", Const, 0}, + {"MCL_FUTURE", Const, 0}, + {"MNT_DETACH", Const, 0}, + {"MNT_EXPIRE", Const, 0}, + {"MNT_FORCE", Const, 0}, + {"MSG_BCAST", Const, 1}, + {"MSG_CMSG_CLOEXEC", Const, 0}, + {"MSG_COMPAT", Const, 0}, + {"MSG_CONFIRM", Const, 0}, + {"MSG_CONTROLMBUF", Const, 1}, + {"MSG_CTRUNC", Const, 0}, + {"MSG_DONTROUTE", Const, 0}, + {"MSG_DONTWAIT", Const, 0}, + {"MSG_EOF", Const, 0}, + {"MSG_EOR", Const, 0}, + {"MSG_ERRQUEUE", Const, 0}, + {"MSG_FASTOPEN", Const, 1}, + {"MSG_FIN", Const, 0}, + {"MSG_FLUSH", Const, 0}, + {"MSG_HAVEMORE", Const, 0}, + {"MSG_HOLD", Const, 0}, + {"MSG_IOVUSRSPACE", Const, 1}, + {"MSG_LENUSRSPACE", Const, 1}, + {"MSG_MCAST", Const, 1}, + {"MSG_MORE", Const, 0}, + {"MSG_NAMEMBUF", Const, 1}, + {"MSG_NBIO", Const, 0}, + {"MSG_NEEDSA", Const, 0}, + {"MSG_NOSIGNAL", Const, 0}, + {"MSG_NOTIFICATION", Const, 0}, + {"MSG_OOB", Const, 0}, + {"MSG_PEEK", Const, 0}, + {"MSG_PROXY", Const, 0}, + {"MSG_RCVMORE", Const, 0}, + {"MSG_RST", Const, 0}, + {"MSG_SEND", Const, 0}, + {"MSG_SYN", Const, 0}, + {"MSG_TRUNC", Const, 0}, + {"MSG_TRYHARD", Const, 0}, + {"MSG_USERFLAGS", Const, 1}, + {"MSG_WAITALL", Const, 0}, + {"MSG_WAITFORONE", Const, 0}, + {"MSG_WAITSTREAM", Const, 0}, + {"MS_ACTIVE", Const, 0}, + {"MS_ASYNC", Const, 0}, + {"MS_BIND", Const, 0}, + {"MS_DEACTIVATE", Const, 0}, + {"MS_DIRSYNC", Const, 0}, + {"MS_INVALIDATE", Const, 0}, + {"MS_I_VERSION", Const, 0}, + {"MS_KERNMOUNT", Const, 0}, + {"MS_KILLPAGES", Const, 0}, + {"MS_MANDLOCK", Const, 0}, + {"MS_MGC_MSK", Const, 0}, + {"MS_MGC_VAL", Const, 0}, + {"MS_MOVE", Const, 0}, + {"MS_NOATIME", Const, 0}, + {"MS_NODEV", Const, 0}, + {"MS_NODIRATIME", Const, 0}, + {"MS_NOEXEC", Const, 0}, + {"MS_NOSUID", Const, 0}, + {"MS_NOUSER", Const, 0}, + {"MS_POSIXACL", Const, 0}, + {"MS_PRIVATE", Const, 0}, + {"MS_RDONLY", Const, 0}, + {"MS_REC", Const, 0}, + {"MS_RELATIME", Const, 0}, + {"MS_REMOUNT", Const, 0}, + {"MS_RMT_MASK", Const, 0}, + {"MS_SHARED", Const, 0}, + {"MS_SILENT", Const, 0}, + {"MS_SLAVE", Const, 0}, + {"MS_STRICTATIME", Const, 0}, + {"MS_SYNC", Const, 0}, + {"MS_SYNCHRONOUS", Const, 0}, + {"MS_UNBINDABLE", Const, 0}, + {"Madvise", Func, 0}, + {"MapViewOfFile", Func, 0}, + {"MaxTokenInfoClass", Const, 0}, + {"Mclpool", Type, 2}, + {"Mclpool.Alive", Field, 2}, + {"Mclpool.Cwm", Field, 2}, + {"Mclpool.Grown", Field, 2}, + {"Mclpool.Hwm", Field, 2}, + {"Mclpool.Lwm", Field, 2}, + {"MibIfRow", Type, 0}, + {"MibIfRow.AdminStatus", Field, 0}, + {"MibIfRow.Descr", Field, 0}, + {"MibIfRow.DescrLen", Field, 0}, + {"MibIfRow.InDiscards", Field, 0}, + {"MibIfRow.InErrors", Field, 0}, + {"MibIfRow.InNUcastPkts", Field, 0}, + {"MibIfRow.InOctets", Field, 0}, + {"MibIfRow.InUcastPkts", Field, 0}, + {"MibIfRow.InUnknownProtos", Field, 0}, + {"MibIfRow.Index", Field, 0}, + {"MibIfRow.LastChange", Field, 0}, + {"MibIfRow.Mtu", Field, 0}, + {"MibIfRow.Name", Field, 0}, + {"MibIfRow.OperStatus", Field, 0}, + {"MibIfRow.OutDiscards", Field, 0}, + {"MibIfRow.OutErrors", Field, 0}, + {"MibIfRow.OutNUcastPkts", Field, 0}, + {"MibIfRow.OutOctets", Field, 0}, + {"MibIfRow.OutQLen", Field, 0}, + {"MibIfRow.OutUcastPkts", Field, 0}, + {"MibIfRow.PhysAddr", Field, 0}, + {"MibIfRow.PhysAddrLen", Field, 0}, + {"MibIfRow.Speed", Field, 0}, + {"MibIfRow.Type", Field, 0}, + {"Mkdir", Func, 0}, + {"Mkdirat", Func, 0}, + {"Mkfifo", Func, 0}, + {"Mknod", Func, 0}, + {"Mknodat", Func, 0}, + {"Mlock", Func, 0}, + {"Mlockall", Func, 0}, + {"Mmap", Func, 0}, + {"Mount", Func, 0}, + {"MoveFile", Func, 0}, + {"Mprotect", Func, 0}, + {"Msghdr", Type, 0}, + {"Msghdr.Control", Field, 0}, + {"Msghdr.Controllen", Field, 0}, + {"Msghdr.Flags", Field, 0}, + {"Msghdr.Iov", Field, 0}, + {"Msghdr.Iovlen", Field, 0}, + {"Msghdr.Name", Field, 0}, + {"Msghdr.Namelen", Field, 0}, + {"Msghdr.Pad_cgo_0", Field, 0}, + {"Msghdr.Pad_cgo_1", Field, 0}, + {"Munlock", Func, 0}, + {"Munlockall", Func, 0}, + {"Munmap", Func, 0}, + {"MustLoadDLL", Func, 0}, + {"NAME_MAX", Const, 0}, + {"NETLINK_ADD_MEMBERSHIP", Const, 0}, + {"NETLINK_AUDIT", Const, 0}, + {"NETLINK_BROADCAST_ERROR", Const, 0}, + {"NETLINK_CONNECTOR", Const, 0}, + {"NETLINK_DNRTMSG", Const, 0}, + {"NETLINK_DROP_MEMBERSHIP", Const, 0}, + {"NETLINK_ECRYPTFS", Const, 0}, + {"NETLINK_FIB_LOOKUP", Const, 0}, + {"NETLINK_FIREWALL", Const, 0}, + {"NETLINK_GENERIC", Const, 0}, + {"NETLINK_INET_DIAG", Const, 0}, + {"NETLINK_IP6_FW", Const, 0}, + {"NETLINK_ISCSI", Const, 0}, + {"NETLINK_KOBJECT_UEVENT", Const, 0}, + {"NETLINK_NETFILTER", Const, 0}, + {"NETLINK_NFLOG", Const, 0}, + {"NETLINK_NO_ENOBUFS", Const, 0}, + {"NETLINK_PKTINFO", Const, 0}, + {"NETLINK_RDMA", Const, 0}, + {"NETLINK_ROUTE", Const, 0}, + {"NETLINK_SCSITRANSPORT", Const, 0}, + {"NETLINK_SELINUX", Const, 0}, + {"NETLINK_UNUSED", Const, 0}, + {"NETLINK_USERSOCK", Const, 0}, + {"NETLINK_XFRM", Const, 0}, + {"NET_RT_DUMP", Const, 0}, + {"NET_RT_DUMP2", Const, 0}, + {"NET_RT_FLAGS", Const, 0}, + {"NET_RT_IFLIST", Const, 0}, + {"NET_RT_IFLIST2", Const, 0}, + {"NET_RT_IFLISTL", Const, 1}, + {"NET_RT_IFMALIST", Const, 0}, + {"NET_RT_MAXID", Const, 0}, + {"NET_RT_OIFLIST", Const, 1}, + {"NET_RT_OOIFLIST", Const, 1}, + {"NET_RT_STAT", Const, 0}, + {"NET_RT_STATS", Const, 1}, + {"NET_RT_TABLE", Const, 1}, + {"NET_RT_TRASH", Const, 0}, + {"NLA_ALIGNTO", Const, 0}, + {"NLA_F_NESTED", Const, 0}, + {"NLA_F_NET_BYTEORDER", Const, 0}, + {"NLA_HDRLEN", Const, 0}, + {"NLMSG_ALIGNTO", Const, 0}, + {"NLMSG_DONE", Const, 0}, + {"NLMSG_ERROR", Const, 0}, + {"NLMSG_HDRLEN", Const, 0}, + {"NLMSG_MIN_TYPE", Const, 0}, + {"NLMSG_NOOP", Const, 0}, + {"NLMSG_OVERRUN", Const, 0}, + {"NLM_F_ACK", Const, 0}, + {"NLM_F_APPEND", Const, 0}, + {"NLM_F_ATOMIC", Const, 0}, + {"NLM_F_CREATE", Const, 0}, + {"NLM_F_DUMP", Const, 0}, + {"NLM_F_ECHO", Const, 0}, + {"NLM_F_EXCL", Const, 0}, + {"NLM_F_MATCH", Const, 0}, + {"NLM_F_MULTI", Const, 0}, + {"NLM_F_REPLACE", Const, 0}, + {"NLM_F_REQUEST", Const, 0}, + {"NLM_F_ROOT", Const, 0}, + {"NOFLSH", Const, 0}, + {"NOTE_ABSOLUTE", Const, 0}, + {"NOTE_ATTRIB", Const, 0}, + {"NOTE_BACKGROUND", Const, 16}, + {"NOTE_CHILD", Const, 0}, + {"NOTE_CRITICAL", Const, 16}, + {"NOTE_DELETE", Const, 0}, + {"NOTE_EOF", Const, 1}, + {"NOTE_EXEC", Const, 0}, + {"NOTE_EXIT", Const, 0}, + {"NOTE_EXITSTATUS", Const, 0}, + {"NOTE_EXIT_CSERROR", Const, 16}, + {"NOTE_EXIT_DECRYPTFAIL", Const, 16}, + {"NOTE_EXIT_DETAIL", Const, 16}, + {"NOTE_EXIT_DETAIL_MASK", Const, 16}, + {"NOTE_EXIT_MEMORY", Const, 16}, + {"NOTE_EXIT_REPARENTED", Const, 16}, + {"NOTE_EXTEND", Const, 0}, + {"NOTE_FFAND", Const, 0}, + {"NOTE_FFCOPY", Const, 0}, + {"NOTE_FFCTRLMASK", Const, 0}, + {"NOTE_FFLAGSMASK", Const, 0}, + {"NOTE_FFNOP", Const, 0}, + {"NOTE_FFOR", Const, 0}, + {"NOTE_FORK", Const, 0}, + {"NOTE_LEEWAY", Const, 16}, + {"NOTE_LINK", Const, 0}, + {"NOTE_LOWAT", Const, 0}, + {"NOTE_NONE", Const, 0}, + {"NOTE_NSECONDS", Const, 0}, + {"NOTE_PCTRLMASK", Const, 0}, + {"NOTE_PDATAMASK", Const, 0}, + {"NOTE_REAP", Const, 0}, + {"NOTE_RENAME", Const, 0}, + {"NOTE_RESOURCEEND", Const, 0}, + {"NOTE_REVOKE", Const, 0}, + {"NOTE_SECONDS", Const, 0}, + {"NOTE_SIGNAL", Const, 0}, + {"NOTE_TRACK", Const, 0}, + {"NOTE_TRACKERR", Const, 0}, + {"NOTE_TRIGGER", Const, 0}, + {"NOTE_TRUNCATE", Const, 1}, + {"NOTE_USECONDS", Const, 0}, + {"NOTE_VM_ERROR", Const, 0}, + {"NOTE_VM_PRESSURE", Const, 0}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0}, + {"NOTE_VM_PRESSURE_TERMINATE", Const, 0}, + {"NOTE_WRITE", Const, 0}, + {"NameCanonical", Const, 0}, + {"NameCanonicalEx", Const, 0}, + {"NameDisplay", Const, 0}, + {"NameDnsDomain", Const, 0}, + {"NameFullyQualifiedDN", Const, 0}, + {"NameSamCompatible", Const, 0}, + {"NameServicePrincipal", Const, 0}, + {"NameUniqueId", Const, 0}, + {"NameUnknown", Const, 0}, + {"NameUserPrincipal", Const, 0}, + {"Nanosleep", Func, 0}, + {"NetApiBufferFree", Func, 0}, + {"NetGetJoinInformation", Func, 2}, + {"NetSetupDomainName", Const, 2}, + {"NetSetupUnjoined", Const, 2}, + {"NetSetupUnknownStatus", Const, 2}, + {"NetSetupWorkgroupName", Const, 2}, + {"NetUserGetInfo", Func, 0}, + {"NetlinkMessage", Type, 0}, + {"NetlinkMessage.Data", Field, 0}, + {"NetlinkMessage.Header", Field, 0}, + {"NetlinkRIB", Func, 0}, + {"NetlinkRouteAttr", Type, 0}, + {"NetlinkRouteAttr.Attr", Field, 0}, + {"NetlinkRouteAttr.Value", Field, 0}, + {"NetlinkRouteRequest", Type, 0}, + {"NetlinkRouteRequest.Data", Field, 0}, + {"NetlinkRouteRequest.Header", Field, 0}, + {"NewCallback", Func, 0}, + {"NewCallbackCDecl", Func, 3}, + {"NewLazyDLL", Func, 0}, + {"NlAttr", Type, 0}, + {"NlAttr.Len", Field, 0}, + {"NlAttr.Type", Field, 0}, + {"NlMsgerr", Type, 0}, + {"NlMsgerr.Error", Field, 0}, + {"NlMsgerr.Msg", Field, 0}, + {"NlMsghdr", Type, 0}, + {"NlMsghdr.Flags", Field, 0}, + {"NlMsghdr.Len", Field, 0}, + {"NlMsghdr.Pid", Field, 0}, + {"NlMsghdr.Seq", Field, 0}, + {"NlMsghdr.Type", Field, 0}, + {"NsecToFiletime", Func, 0}, + {"NsecToTimespec", Func, 0}, + {"NsecToTimeval", Func, 0}, + {"Ntohs", Func, 0}, + {"OCRNL", Const, 0}, + {"OFDEL", Const, 0}, + {"OFILL", Const, 0}, + {"OFIOGETBMAP", Const, 1}, + {"OID_PKIX_KP_SERVER_AUTH", Var, 0}, + {"OID_SERVER_GATED_CRYPTO", Var, 0}, + {"OID_SGC_NETSCAPE", Var, 0}, + {"OLCUC", Const, 0}, + {"ONLCR", Const, 0}, + {"ONLRET", Const, 0}, + {"ONOCR", Const, 0}, + {"ONOEOT", Const, 1}, + {"OPEN_ALWAYS", Const, 0}, + {"OPEN_EXISTING", Const, 0}, + {"OPOST", Const, 0}, + {"O_ACCMODE", Const, 0}, + {"O_ALERT", Const, 0}, + {"O_ALT_IO", Const, 1}, + {"O_APPEND", Const, 0}, + {"O_ASYNC", Const, 0}, + {"O_CLOEXEC", Const, 0}, + {"O_CREAT", Const, 0}, + {"O_DIRECT", Const, 0}, + {"O_DIRECTORY", Const, 0}, + {"O_DP_GETRAWENCRYPTED", Const, 16}, + {"O_DSYNC", Const, 0}, + {"O_EVTONLY", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_EXEC", Const, 0}, + {"O_EXLOCK", Const, 0}, + {"O_FSYNC", Const, 0}, + {"O_LARGEFILE", Const, 0}, + {"O_NDELAY", Const, 0}, + {"O_NOATIME", Const, 0}, + {"O_NOCTTY", Const, 0}, + {"O_NOFOLLOW", Const, 0}, + {"O_NONBLOCK", Const, 0}, + {"O_NOSIGPIPE", Const, 1}, + {"O_POPUP", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_RSYNC", Const, 0}, + {"O_SHLOCK", Const, 0}, + {"O_SYMLINK", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_TTY_INIT", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenCurrentProcessToken", Func, 0}, + {"OpenProcess", Func, 0}, + {"OpenProcessToken", Func, 0}, + {"Openat", Func, 0}, + {"Overlapped", Type, 0}, + {"Overlapped.HEvent", Field, 0}, + {"Overlapped.Internal", Field, 0}, + {"Overlapped.InternalHigh", Field, 0}, + {"Overlapped.Offset", Field, 0}, + {"Overlapped.OffsetHigh", Field, 0}, + {"PACKET_ADD_MEMBERSHIP", Const, 0}, + {"PACKET_BROADCAST", Const, 0}, + {"PACKET_DROP_MEMBERSHIP", Const, 0}, + {"PACKET_FASTROUTE", Const, 0}, + {"PACKET_HOST", Const, 0}, + {"PACKET_LOOPBACK", Const, 0}, + {"PACKET_MR_ALLMULTI", Const, 0}, + {"PACKET_MR_MULTICAST", Const, 0}, + {"PACKET_MR_PROMISC", Const, 0}, + {"PACKET_MULTICAST", Const, 0}, + {"PACKET_OTHERHOST", Const, 0}, + {"PACKET_OUTGOING", Const, 0}, + {"PACKET_RECV_OUTPUT", Const, 0}, + {"PACKET_RX_RING", Const, 0}, + {"PACKET_STATISTICS", Const, 0}, + {"PAGE_EXECUTE_READ", Const, 0}, + {"PAGE_EXECUTE_READWRITE", Const, 0}, + {"PAGE_EXECUTE_WRITECOPY", Const, 0}, + {"PAGE_READONLY", Const, 0}, + {"PAGE_READWRITE", Const, 0}, + {"PAGE_WRITECOPY", Const, 0}, + {"PARENB", Const, 0}, + {"PARMRK", Const, 0}, + {"PARODD", Const, 0}, + {"PENDIN", Const, 0}, + {"PFL_HIDDEN", Const, 2}, + {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2}, + {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2}, + {"PFL_NETWORKDIRECT_PROVIDER", Const, 2}, + {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2}, + {"PF_FLUSH", Const, 1}, + {"PKCS_7_ASN_ENCODING", Const, 0}, + {"PMC5_PIPELINE_FLUSH", Const, 1}, + {"PRIO_PGRP", Const, 2}, + {"PRIO_PROCESS", Const, 2}, + {"PRIO_USER", Const, 2}, + {"PRI_IOFLUSH", Const, 1}, + {"PROCESS_QUERY_INFORMATION", Const, 0}, + {"PROCESS_TERMINATE", Const, 2}, + {"PROT_EXEC", Const, 0}, + {"PROT_GROWSDOWN", Const, 0}, + {"PROT_GROWSUP", Const, 0}, + {"PROT_NONE", Const, 0}, + {"PROT_READ", Const, 0}, + {"PROT_WRITE", Const, 0}, + {"PROV_DH_SCHANNEL", Const, 0}, + {"PROV_DSS", Const, 0}, + {"PROV_DSS_DH", Const, 0}, + {"PROV_EC_ECDSA_FULL", Const, 0}, + {"PROV_EC_ECDSA_SIG", Const, 0}, + {"PROV_EC_ECNRA_FULL", Const, 0}, + {"PROV_EC_ECNRA_SIG", Const, 0}, + {"PROV_FORTEZZA", Const, 0}, + {"PROV_INTEL_SEC", Const, 0}, + {"PROV_MS_EXCHANGE", Const, 0}, + {"PROV_REPLACE_OWF", Const, 0}, + {"PROV_RNG", Const, 0}, + {"PROV_RSA_AES", Const, 0}, + {"PROV_RSA_FULL", Const, 0}, + {"PROV_RSA_SCHANNEL", Const, 0}, + {"PROV_RSA_SIG", Const, 0}, + {"PROV_SPYRUS_LYNKS", Const, 0}, + {"PROV_SSL", Const, 0}, + {"PR_CAPBSET_DROP", Const, 0}, + {"PR_CAPBSET_READ", Const, 0}, + {"PR_CLEAR_SECCOMP_FILTER", Const, 0}, + {"PR_ENDIAN_BIG", Const, 0}, + {"PR_ENDIAN_LITTLE", Const, 0}, + {"PR_ENDIAN_PPC_LITTLE", Const, 0}, + {"PR_FPEMU_NOPRINT", Const, 0}, + {"PR_FPEMU_SIGFPE", Const, 0}, + {"PR_FP_EXC_ASYNC", Const, 0}, + {"PR_FP_EXC_DISABLED", Const, 0}, + {"PR_FP_EXC_DIV", Const, 0}, + {"PR_FP_EXC_INV", Const, 0}, + {"PR_FP_EXC_NONRECOV", Const, 0}, + {"PR_FP_EXC_OVF", Const, 0}, + {"PR_FP_EXC_PRECISE", Const, 0}, + {"PR_FP_EXC_RES", Const, 0}, + {"PR_FP_EXC_SW_ENABLE", Const, 0}, + {"PR_FP_EXC_UND", Const, 0}, + {"PR_GET_DUMPABLE", Const, 0}, + {"PR_GET_ENDIAN", Const, 0}, + {"PR_GET_FPEMU", Const, 0}, + {"PR_GET_FPEXC", Const, 0}, + {"PR_GET_KEEPCAPS", Const, 0}, + {"PR_GET_NAME", Const, 0}, + {"PR_GET_PDEATHSIG", Const, 0}, + {"PR_GET_SECCOMP", Const, 0}, + {"PR_GET_SECCOMP_FILTER", Const, 0}, + {"PR_GET_SECUREBITS", Const, 0}, + {"PR_GET_TIMERSLACK", Const, 0}, + {"PR_GET_TIMING", Const, 0}, + {"PR_GET_TSC", Const, 0}, + {"PR_GET_UNALIGN", Const, 0}, + {"PR_MCE_KILL", Const, 0}, + {"PR_MCE_KILL_CLEAR", Const, 0}, + {"PR_MCE_KILL_DEFAULT", Const, 0}, + {"PR_MCE_KILL_EARLY", Const, 0}, + {"PR_MCE_KILL_GET", Const, 0}, + {"PR_MCE_KILL_LATE", Const, 0}, + {"PR_MCE_KILL_SET", Const, 0}, + {"PR_SECCOMP_FILTER_EVENT", Const, 0}, + {"PR_SECCOMP_FILTER_SYSCALL", Const, 0}, + {"PR_SET_DUMPABLE", Const, 0}, + {"PR_SET_ENDIAN", Const, 0}, + {"PR_SET_FPEMU", Const, 0}, + {"PR_SET_FPEXC", Const, 0}, + {"PR_SET_KEEPCAPS", Const, 0}, + {"PR_SET_NAME", Const, 0}, + {"PR_SET_PDEATHSIG", Const, 0}, + {"PR_SET_PTRACER", Const, 0}, + {"PR_SET_SECCOMP", Const, 0}, + {"PR_SET_SECCOMP_FILTER", Const, 0}, + {"PR_SET_SECUREBITS", Const, 0}, + {"PR_SET_TIMERSLACK", Const, 0}, + {"PR_SET_TIMING", Const, 0}, + {"PR_SET_TSC", Const, 0}, + {"PR_SET_UNALIGN", Const, 0}, + {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0}, + {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0}, + {"PR_TIMING_STATISTICAL", Const, 0}, + {"PR_TIMING_TIMESTAMP", Const, 0}, + {"PR_TSC_ENABLE", Const, 0}, + {"PR_TSC_SIGSEGV", Const, 0}, + {"PR_UNALIGN_NOPRINT", Const, 0}, + {"PR_UNALIGN_SIGBUS", Const, 0}, + {"PTRACE_ARCH_PRCTL", Const, 0}, + {"PTRACE_ATTACH", Const, 0}, + {"PTRACE_CONT", Const, 0}, + {"PTRACE_DETACH", Const, 0}, + {"PTRACE_EVENT_CLONE", Const, 0}, + {"PTRACE_EVENT_EXEC", Const, 0}, + {"PTRACE_EVENT_EXIT", Const, 0}, + {"PTRACE_EVENT_FORK", Const, 0}, + {"PTRACE_EVENT_VFORK", Const, 0}, + {"PTRACE_EVENT_VFORK_DONE", Const, 0}, + {"PTRACE_GETCRUNCHREGS", Const, 0}, + {"PTRACE_GETEVENTMSG", Const, 0}, + {"PTRACE_GETFPREGS", Const, 0}, + {"PTRACE_GETFPXREGS", Const, 0}, + {"PTRACE_GETHBPREGS", Const, 0}, + {"PTRACE_GETREGS", Const, 0}, + {"PTRACE_GETREGSET", Const, 0}, + {"PTRACE_GETSIGINFO", Const, 0}, + {"PTRACE_GETVFPREGS", Const, 0}, + {"PTRACE_GETWMMXREGS", Const, 0}, + {"PTRACE_GET_THREAD_AREA", Const, 0}, + {"PTRACE_KILL", Const, 0}, + {"PTRACE_OLDSETOPTIONS", Const, 0}, + {"PTRACE_O_MASK", Const, 0}, + {"PTRACE_O_TRACECLONE", Const, 0}, + {"PTRACE_O_TRACEEXEC", Const, 0}, + {"PTRACE_O_TRACEEXIT", Const, 0}, + {"PTRACE_O_TRACEFORK", Const, 0}, + {"PTRACE_O_TRACESYSGOOD", Const, 0}, + {"PTRACE_O_TRACEVFORK", Const, 0}, + {"PTRACE_O_TRACEVFORKDONE", Const, 0}, + {"PTRACE_PEEKDATA", Const, 0}, + {"PTRACE_PEEKTEXT", Const, 0}, + {"PTRACE_PEEKUSR", Const, 0}, + {"PTRACE_POKEDATA", Const, 0}, + {"PTRACE_POKETEXT", Const, 0}, + {"PTRACE_POKEUSR", Const, 0}, + {"PTRACE_SETCRUNCHREGS", Const, 0}, + {"PTRACE_SETFPREGS", Const, 0}, + {"PTRACE_SETFPXREGS", Const, 0}, + {"PTRACE_SETHBPREGS", Const, 0}, + {"PTRACE_SETOPTIONS", Const, 0}, + {"PTRACE_SETREGS", Const, 0}, + {"PTRACE_SETREGSET", Const, 0}, + {"PTRACE_SETSIGINFO", Const, 0}, + {"PTRACE_SETVFPREGS", Const, 0}, + {"PTRACE_SETWMMXREGS", Const, 0}, + {"PTRACE_SET_SYSCALL", Const, 0}, + {"PTRACE_SET_THREAD_AREA", Const, 0}, + {"PTRACE_SINGLEBLOCK", Const, 0}, + {"PTRACE_SINGLESTEP", Const, 0}, + {"PTRACE_SYSCALL", Const, 0}, + {"PTRACE_SYSEMU", Const, 0}, + {"PTRACE_SYSEMU_SINGLESTEP", Const, 0}, + {"PTRACE_TRACEME", Const, 0}, + {"PT_ATTACH", Const, 0}, + {"PT_ATTACHEXC", Const, 0}, + {"PT_CONTINUE", Const, 0}, + {"PT_DATA_ADDR", Const, 0}, + {"PT_DENY_ATTACH", Const, 0}, + {"PT_DETACH", Const, 0}, + {"PT_FIRSTMACH", Const, 0}, + {"PT_FORCEQUOTA", Const, 0}, + {"PT_KILL", Const, 0}, + {"PT_MASK", Const, 1}, + {"PT_READ_D", Const, 0}, + {"PT_READ_I", Const, 0}, + {"PT_READ_U", Const, 0}, + {"PT_SIGEXC", Const, 0}, + {"PT_STEP", Const, 0}, + {"PT_TEXT_ADDR", Const, 0}, + {"PT_TEXT_END_ADDR", Const, 0}, + {"PT_THUPDATE", Const, 0}, + {"PT_TRACE_ME", Const, 0}, + {"PT_WRITE_D", Const, 0}, + {"PT_WRITE_I", Const, 0}, + {"PT_WRITE_U", Const, 0}, + {"ParseDirent", Func, 0}, + {"ParseNetlinkMessage", Func, 0}, + {"ParseNetlinkRouteAttr", Func, 0}, + {"ParseRoutingMessage", Func, 0}, + {"ParseRoutingSockaddr", Func, 0}, + {"ParseSocketControlMessage", Func, 0}, + {"ParseUnixCredentials", Func, 0}, + {"ParseUnixRights", Func, 0}, + {"PathMax", Const, 0}, + {"Pathconf", Func, 0}, + {"Pause", Func, 0}, + {"Pipe", Func, 0}, + {"Pipe2", Func, 1}, + {"PivotRoot", Func, 0}, + {"Pointer", Type, 11}, + {"PostQueuedCompletionStatus", Func, 0}, + {"Pread", Func, 0}, + {"Proc", Type, 0}, + {"Proc.Dll", Field, 0}, + {"Proc.Name", Field, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process32First", Func, 4}, + {"Process32Next", Func, 4}, + {"ProcessEntry32", Type, 4}, + {"ProcessEntry32.DefaultHeapID", Field, 4}, + {"ProcessEntry32.ExeFile", Field, 4}, + {"ProcessEntry32.Flags", Field, 4}, + {"ProcessEntry32.ModuleID", Field, 4}, + {"ProcessEntry32.ParentProcessID", Field, 4}, + {"ProcessEntry32.PriClassBase", Field, 4}, + {"ProcessEntry32.ProcessID", Field, 4}, + {"ProcessEntry32.Size", Field, 4}, + {"ProcessEntry32.Threads", Field, 4}, + {"ProcessEntry32.Usage", Field, 4}, + {"ProcessInformation", Type, 0}, + {"ProcessInformation.Process", Field, 0}, + {"ProcessInformation.ProcessId", Field, 0}, + {"ProcessInformation.Thread", Field, 0}, + {"ProcessInformation.ThreadId", Field, 0}, + {"Protoent", Type, 0}, + {"Protoent.Aliases", Field, 0}, + {"Protoent.Name", Field, 0}, + {"Protoent.Proto", Field, 0}, + {"PtraceAttach", Func, 0}, + {"PtraceCont", Func, 0}, + {"PtraceDetach", Func, 0}, + {"PtraceGetEventMsg", Func, 0}, + {"PtraceGetRegs", Func, 0}, + {"PtracePeekData", Func, 0}, + {"PtracePeekText", Func, 0}, + {"PtracePokeData", Func, 0}, + {"PtracePokeText", Func, 0}, + {"PtraceRegs", Type, 0}, + {"PtraceRegs.Cs", Field, 0}, + {"PtraceRegs.Ds", Field, 0}, + {"PtraceRegs.Eax", Field, 0}, + {"PtraceRegs.Ebp", Field, 0}, + {"PtraceRegs.Ebx", Field, 0}, + {"PtraceRegs.Ecx", Field, 0}, + {"PtraceRegs.Edi", Field, 0}, + {"PtraceRegs.Edx", Field, 0}, + {"PtraceRegs.Eflags", Field, 0}, + {"PtraceRegs.Eip", Field, 0}, + {"PtraceRegs.Es", Field, 0}, + {"PtraceRegs.Esi", Field, 0}, + {"PtraceRegs.Esp", Field, 0}, + {"PtraceRegs.Fs", Field, 0}, + {"PtraceRegs.Fs_base", Field, 0}, + {"PtraceRegs.Gs", Field, 0}, + {"PtraceRegs.Gs_base", Field, 0}, + {"PtraceRegs.Orig_eax", Field, 0}, + {"PtraceRegs.Orig_rax", Field, 0}, + {"PtraceRegs.R10", Field, 0}, + {"PtraceRegs.R11", Field, 0}, + {"PtraceRegs.R12", Field, 0}, + {"PtraceRegs.R13", Field, 0}, + {"PtraceRegs.R14", Field, 0}, + {"PtraceRegs.R15", Field, 0}, + {"PtraceRegs.R8", Field, 0}, + {"PtraceRegs.R9", Field, 0}, + {"PtraceRegs.Rax", Field, 0}, + {"PtraceRegs.Rbp", Field, 0}, + {"PtraceRegs.Rbx", Field, 0}, + {"PtraceRegs.Rcx", Field, 0}, + {"PtraceRegs.Rdi", Field, 0}, + {"PtraceRegs.Rdx", Field, 0}, + {"PtraceRegs.Rip", Field, 0}, + {"PtraceRegs.Rsi", Field, 0}, + {"PtraceRegs.Rsp", Field, 0}, + {"PtraceRegs.Ss", Field, 0}, + {"PtraceRegs.Uregs", Field, 0}, + {"PtraceRegs.Xcs", Field, 0}, + {"PtraceRegs.Xds", Field, 0}, + {"PtraceRegs.Xes", Field, 0}, + {"PtraceRegs.Xfs", Field, 0}, + {"PtraceRegs.Xgs", Field, 0}, + {"PtraceRegs.Xss", Field, 0}, + {"PtraceSetOptions", Func, 0}, + {"PtraceSetRegs", Func, 0}, + {"PtraceSingleStep", Func, 0}, + {"PtraceSyscall", Func, 1}, + {"Pwrite", Func, 0}, + {"REG_BINARY", Const, 0}, + {"REG_DWORD", Const, 0}, + {"REG_DWORD_BIG_ENDIAN", Const, 0}, + {"REG_DWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_EXPAND_SZ", Const, 0}, + {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0}, + {"REG_LINK", Const, 0}, + {"REG_MULTI_SZ", Const, 0}, + {"REG_NONE", Const, 0}, + {"REG_QWORD", Const, 0}, + {"REG_QWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_RESOURCE_LIST", Const, 0}, + {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0}, + {"REG_SZ", Const, 0}, + {"RLIMIT_AS", Const, 0}, + {"RLIMIT_CORE", Const, 0}, + {"RLIMIT_CPU", Const, 0}, + {"RLIMIT_CPU_USAGE_MONITOR", Const, 16}, + {"RLIMIT_DATA", Const, 0}, + {"RLIMIT_FSIZE", Const, 0}, + {"RLIMIT_NOFILE", Const, 0}, + {"RLIMIT_STACK", Const, 0}, + {"RLIM_INFINITY", Const, 0}, + {"RTAX_ADVMSS", Const, 0}, + {"RTAX_AUTHOR", Const, 0}, + {"RTAX_BRD", Const, 0}, + {"RTAX_CWND", Const, 0}, + {"RTAX_DST", Const, 0}, + {"RTAX_FEATURES", Const, 0}, + {"RTAX_FEATURE_ALLFRAG", Const, 0}, + {"RTAX_FEATURE_ECN", Const, 0}, + {"RTAX_FEATURE_SACK", Const, 0}, + {"RTAX_FEATURE_TIMESTAMP", Const, 0}, + {"RTAX_GATEWAY", Const, 0}, + {"RTAX_GENMASK", Const, 0}, + {"RTAX_HOPLIMIT", Const, 0}, + {"RTAX_IFA", Const, 0}, + {"RTAX_IFP", Const, 0}, + {"RTAX_INITCWND", Const, 0}, + {"RTAX_INITRWND", Const, 0}, + {"RTAX_LABEL", Const, 1}, + {"RTAX_LOCK", Const, 0}, + {"RTAX_MAX", Const, 0}, + {"RTAX_MTU", Const, 0}, + {"RTAX_NETMASK", Const, 0}, + {"RTAX_REORDERING", Const, 0}, + {"RTAX_RTO_MIN", Const, 0}, + {"RTAX_RTT", Const, 0}, + {"RTAX_RTTVAR", Const, 0}, + {"RTAX_SRC", Const, 1}, + {"RTAX_SRCMASK", Const, 1}, + {"RTAX_SSTHRESH", Const, 0}, + {"RTAX_TAG", Const, 1}, + {"RTAX_UNSPEC", Const, 0}, + {"RTAX_WINDOW", Const, 0}, + {"RTA_ALIGNTO", Const, 0}, + {"RTA_AUTHOR", Const, 0}, + {"RTA_BRD", Const, 0}, + {"RTA_CACHEINFO", Const, 0}, + {"RTA_DST", Const, 0}, + {"RTA_FLOW", Const, 0}, + {"RTA_GATEWAY", Const, 0}, + {"RTA_GENMASK", Const, 0}, + {"RTA_IFA", Const, 0}, + {"RTA_IFP", Const, 0}, + {"RTA_IIF", Const, 0}, + {"RTA_LABEL", Const, 1}, + {"RTA_MAX", Const, 0}, + {"RTA_METRICS", Const, 0}, + {"RTA_MULTIPATH", Const, 0}, + {"RTA_NETMASK", Const, 0}, + {"RTA_OIF", Const, 0}, + {"RTA_PREFSRC", Const, 0}, + {"RTA_PRIORITY", Const, 0}, + {"RTA_SRC", Const, 0}, + {"RTA_SRCMASK", Const, 1}, + {"RTA_TABLE", Const, 0}, + {"RTA_TAG", Const, 1}, + {"RTA_UNSPEC", Const, 0}, + {"RTCF_DIRECTSRC", Const, 0}, + {"RTCF_DOREDIRECT", Const, 0}, + {"RTCF_LOG", Const, 0}, + {"RTCF_MASQ", Const, 0}, + {"RTCF_NAT", Const, 0}, + {"RTCF_VALVE", Const, 0}, + {"RTF_ADDRCLASSMASK", Const, 0}, + {"RTF_ADDRCONF", Const, 0}, + {"RTF_ALLONLINK", Const, 0}, + {"RTF_ANNOUNCE", Const, 1}, + {"RTF_BLACKHOLE", Const, 0}, + {"RTF_BROADCAST", Const, 0}, + {"RTF_CACHE", Const, 0}, + {"RTF_CLONED", Const, 1}, + {"RTF_CLONING", Const, 0}, + {"RTF_CONDEMNED", Const, 0}, + {"RTF_DEFAULT", Const, 0}, + {"RTF_DELCLONE", Const, 0}, + {"RTF_DONE", Const, 0}, + {"RTF_DYNAMIC", Const, 0}, + {"RTF_FLOW", Const, 0}, + {"RTF_FMASK", Const, 0}, + {"RTF_GATEWAY", Const, 0}, + {"RTF_GWFLAG_COMPAT", Const, 3}, + {"RTF_HOST", Const, 0}, + {"RTF_IFREF", Const, 0}, + {"RTF_IFSCOPE", Const, 0}, + {"RTF_INTERFACE", Const, 0}, + {"RTF_IRTT", Const, 0}, + {"RTF_LINKRT", Const, 0}, + {"RTF_LLDATA", Const, 0}, + {"RTF_LLINFO", Const, 0}, + {"RTF_LOCAL", Const, 0}, + {"RTF_MASK", Const, 1}, + {"RTF_MODIFIED", Const, 0}, + {"RTF_MPATH", Const, 1}, + {"RTF_MPLS", Const, 1}, + {"RTF_MSS", Const, 0}, + {"RTF_MTU", Const, 0}, + {"RTF_MULTICAST", Const, 0}, + {"RTF_NAT", Const, 0}, + {"RTF_NOFORWARD", Const, 0}, + {"RTF_NONEXTHOP", Const, 0}, + {"RTF_NOPMTUDISC", Const, 0}, + {"RTF_PERMANENT_ARP", Const, 1}, + {"RTF_PINNED", Const, 0}, + {"RTF_POLICY", Const, 0}, + {"RTF_PRCLONING", Const, 0}, + {"RTF_PROTO1", Const, 0}, + {"RTF_PROTO2", Const, 0}, + {"RTF_PROTO3", Const, 0}, + {"RTF_PROXY", Const, 16}, + {"RTF_REINSTATE", Const, 0}, + {"RTF_REJECT", Const, 0}, + {"RTF_RNH_LOCKED", Const, 0}, + {"RTF_ROUTER", Const, 16}, + {"RTF_SOURCE", Const, 1}, + {"RTF_SRC", Const, 1}, + {"RTF_STATIC", Const, 0}, + {"RTF_STICKY", Const, 0}, + {"RTF_THROW", Const, 0}, + {"RTF_TUNNEL", Const, 1}, + {"RTF_UP", Const, 0}, + {"RTF_USETRAILERS", Const, 1}, + {"RTF_WASCLONED", Const, 0}, + {"RTF_WINDOW", Const, 0}, + {"RTF_XRESOLVE", Const, 0}, + {"RTM_ADD", Const, 0}, + {"RTM_BASE", Const, 0}, + {"RTM_CHANGE", Const, 0}, + {"RTM_CHGADDR", Const, 1}, + {"RTM_DELACTION", Const, 0}, + {"RTM_DELADDR", Const, 0}, + {"RTM_DELADDRLABEL", Const, 0}, + {"RTM_DELETE", Const, 0}, + {"RTM_DELLINK", Const, 0}, + {"RTM_DELMADDR", Const, 0}, + {"RTM_DELNEIGH", Const, 0}, + {"RTM_DELQDISC", Const, 0}, + {"RTM_DELROUTE", Const, 0}, + {"RTM_DELRULE", Const, 0}, + {"RTM_DELTCLASS", Const, 0}, + {"RTM_DELTFILTER", Const, 0}, + {"RTM_DESYNC", Const, 1}, + {"RTM_F_CLONED", Const, 0}, + {"RTM_F_EQUALIZE", Const, 0}, + {"RTM_F_NOTIFY", Const, 0}, + {"RTM_F_PREFIX", Const, 0}, + {"RTM_GET", Const, 0}, + {"RTM_GET2", Const, 0}, + {"RTM_GETACTION", Const, 0}, + {"RTM_GETADDR", Const, 0}, + {"RTM_GETADDRLABEL", Const, 0}, + {"RTM_GETANYCAST", Const, 0}, + {"RTM_GETDCB", Const, 0}, + {"RTM_GETLINK", Const, 0}, + {"RTM_GETMULTICAST", Const, 0}, + {"RTM_GETNEIGH", Const, 0}, + {"RTM_GETNEIGHTBL", Const, 0}, + {"RTM_GETQDISC", Const, 0}, + {"RTM_GETROUTE", Const, 0}, + {"RTM_GETRULE", Const, 0}, + {"RTM_GETTCLASS", Const, 0}, + {"RTM_GETTFILTER", Const, 0}, + {"RTM_IEEE80211", Const, 0}, + {"RTM_IFANNOUNCE", Const, 0}, + {"RTM_IFINFO", Const, 0}, + {"RTM_IFINFO2", Const, 0}, + {"RTM_LLINFO_UPD", Const, 1}, + {"RTM_LOCK", Const, 0}, + {"RTM_LOSING", Const, 0}, + {"RTM_MAX", Const, 0}, + {"RTM_MAXSIZE", Const, 1}, + {"RTM_MISS", Const, 0}, + {"RTM_NEWACTION", Const, 0}, + {"RTM_NEWADDR", Const, 0}, + {"RTM_NEWADDRLABEL", Const, 0}, + {"RTM_NEWLINK", Const, 0}, + {"RTM_NEWMADDR", Const, 0}, + {"RTM_NEWMADDR2", Const, 0}, + {"RTM_NEWNDUSEROPT", Const, 0}, + {"RTM_NEWNEIGH", Const, 0}, + {"RTM_NEWNEIGHTBL", Const, 0}, + {"RTM_NEWPREFIX", Const, 0}, + {"RTM_NEWQDISC", Const, 0}, + {"RTM_NEWROUTE", Const, 0}, + {"RTM_NEWRULE", Const, 0}, + {"RTM_NEWTCLASS", Const, 0}, + {"RTM_NEWTFILTER", Const, 0}, + {"RTM_NR_FAMILIES", Const, 0}, + {"RTM_NR_MSGTYPES", Const, 0}, + {"RTM_OIFINFO", Const, 1}, + {"RTM_OLDADD", Const, 0}, + {"RTM_OLDDEL", Const, 0}, + {"RTM_OOIFINFO", Const, 1}, + {"RTM_REDIRECT", Const, 0}, + {"RTM_RESOLVE", Const, 0}, + {"RTM_RTTUNIT", Const, 0}, + {"RTM_SETDCB", Const, 0}, + {"RTM_SETGATE", Const, 1}, + {"RTM_SETLINK", Const, 0}, + {"RTM_SETNEIGHTBL", Const, 0}, + {"RTM_VERSION", Const, 0}, + {"RTNH_ALIGNTO", Const, 0}, + {"RTNH_F_DEAD", Const, 0}, + {"RTNH_F_ONLINK", Const, 0}, + {"RTNH_F_PERVASIVE", Const, 0}, + {"RTNLGRP_IPV4_IFADDR", Const, 1}, + {"RTNLGRP_IPV4_MROUTE", Const, 1}, + {"RTNLGRP_IPV4_ROUTE", Const, 1}, + {"RTNLGRP_IPV4_RULE", Const, 1}, + {"RTNLGRP_IPV6_IFADDR", Const, 1}, + {"RTNLGRP_IPV6_IFINFO", Const, 1}, + {"RTNLGRP_IPV6_MROUTE", Const, 1}, + {"RTNLGRP_IPV6_PREFIX", Const, 1}, + {"RTNLGRP_IPV6_ROUTE", Const, 1}, + {"RTNLGRP_IPV6_RULE", Const, 1}, + {"RTNLGRP_LINK", Const, 1}, + {"RTNLGRP_ND_USEROPT", Const, 1}, + {"RTNLGRP_NEIGH", Const, 1}, + {"RTNLGRP_NONE", Const, 1}, + {"RTNLGRP_NOTIFY", Const, 1}, + {"RTNLGRP_TC", Const, 1}, + {"RTN_ANYCAST", Const, 0}, + {"RTN_BLACKHOLE", Const, 0}, + {"RTN_BROADCAST", Const, 0}, + {"RTN_LOCAL", Const, 0}, + {"RTN_MAX", Const, 0}, + {"RTN_MULTICAST", Const, 0}, + {"RTN_NAT", Const, 0}, + {"RTN_PROHIBIT", Const, 0}, + {"RTN_THROW", Const, 0}, + {"RTN_UNICAST", Const, 0}, + {"RTN_UNREACHABLE", Const, 0}, + {"RTN_UNSPEC", Const, 0}, + {"RTN_XRESOLVE", Const, 0}, + {"RTPROT_BIRD", Const, 0}, + {"RTPROT_BOOT", Const, 0}, + {"RTPROT_DHCP", Const, 0}, + {"RTPROT_DNROUTED", Const, 0}, + {"RTPROT_GATED", Const, 0}, + {"RTPROT_KERNEL", Const, 0}, + {"RTPROT_MRT", Const, 0}, + {"RTPROT_NTK", Const, 0}, + {"RTPROT_RA", Const, 0}, + {"RTPROT_REDIRECT", Const, 0}, + {"RTPROT_STATIC", Const, 0}, + {"RTPROT_UNSPEC", Const, 0}, + {"RTPROT_XORP", Const, 0}, + {"RTPROT_ZEBRA", Const, 0}, + {"RTV_EXPIRE", Const, 0}, + {"RTV_HOPCOUNT", Const, 0}, + {"RTV_MTU", Const, 0}, + {"RTV_RPIPE", Const, 0}, + {"RTV_RTT", Const, 0}, + {"RTV_RTTVAR", Const, 0}, + {"RTV_SPIPE", Const, 0}, + {"RTV_SSTHRESH", Const, 0}, + {"RTV_WEIGHT", Const, 0}, + {"RT_CACHING_CONTEXT", Const, 1}, + {"RT_CLASS_DEFAULT", Const, 0}, + {"RT_CLASS_LOCAL", Const, 0}, + {"RT_CLASS_MAIN", Const, 0}, + {"RT_CLASS_MAX", Const, 0}, + {"RT_CLASS_UNSPEC", Const, 0}, + {"RT_DEFAULT_FIB", Const, 1}, + {"RT_NORTREF", Const, 1}, + {"RT_SCOPE_HOST", Const, 0}, + {"RT_SCOPE_LINK", Const, 0}, + {"RT_SCOPE_NOWHERE", Const, 0}, + {"RT_SCOPE_SITE", Const, 0}, + {"RT_SCOPE_UNIVERSE", Const, 0}, + {"RT_TABLEID_MAX", Const, 1}, + {"RT_TABLE_COMPAT", Const, 0}, + {"RT_TABLE_DEFAULT", Const, 0}, + {"RT_TABLE_LOCAL", Const, 0}, + {"RT_TABLE_MAIN", Const, 0}, + {"RT_TABLE_MAX", Const, 0}, + {"RT_TABLE_UNSPEC", Const, 0}, + {"RUSAGE_CHILDREN", Const, 0}, + {"RUSAGE_SELF", Const, 0}, + {"RUSAGE_THREAD", Const, 0}, + {"Radvisory_t", Type, 0}, + {"Radvisory_t.Count", Field, 0}, + {"Radvisory_t.Offset", Field, 0}, + {"Radvisory_t.Pad_cgo_0", Field, 0}, + {"RawConn", Type, 9}, + {"RawSockaddr", Type, 0}, + {"RawSockaddr.Data", Field, 0}, + {"RawSockaddr.Family", Field, 0}, + {"RawSockaddr.Len", Field, 0}, + {"RawSockaddrAny", Type, 0}, + {"RawSockaddrAny.Addr", Field, 0}, + {"RawSockaddrAny.Pad", Field, 0}, + {"RawSockaddrDatalink", Type, 0}, + {"RawSockaddrDatalink.Alen", Field, 0}, + {"RawSockaddrDatalink.Data", Field, 0}, + {"RawSockaddrDatalink.Family", Field, 0}, + {"RawSockaddrDatalink.Index", Field, 0}, + {"RawSockaddrDatalink.Len", Field, 0}, + {"RawSockaddrDatalink.Nlen", Field, 0}, + {"RawSockaddrDatalink.Pad_cgo_0", Field, 2}, + {"RawSockaddrDatalink.Slen", Field, 0}, + {"RawSockaddrDatalink.Type", Field, 0}, + {"RawSockaddrInet4", Type, 0}, + {"RawSockaddrInet4.Addr", Field, 0}, + {"RawSockaddrInet4.Family", Field, 0}, + {"RawSockaddrInet4.Len", Field, 0}, + {"RawSockaddrInet4.Port", Field, 0}, + {"RawSockaddrInet4.Zero", Field, 0}, + {"RawSockaddrInet6", Type, 0}, + {"RawSockaddrInet6.Addr", Field, 0}, + {"RawSockaddrInet6.Family", Field, 0}, + {"RawSockaddrInet6.Flowinfo", Field, 0}, + {"RawSockaddrInet6.Len", Field, 0}, + {"RawSockaddrInet6.Port", Field, 0}, + {"RawSockaddrInet6.Scope_id", Field, 0}, + {"RawSockaddrLinklayer", Type, 0}, + {"RawSockaddrLinklayer.Addr", Field, 0}, + {"RawSockaddrLinklayer.Family", Field, 0}, + {"RawSockaddrLinklayer.Halen", Field, 0}, + {"RawSockaddrLinklayer.Hatype", Field, 0}, + {"RawSockaddrLinklayer.Ifindex", Field, 0}, + {"RawSockaddrLinklayer.Pkttype", Field, 0}, + {"RawSockaddrLinklayer.Protocol", Field, 0}, + {"RawSockaddrNetlink", Type, 0}, + {"RawSockaddrNetlink.Family", Field, 0}, + {"RawSockaddrNetlink.Groups", Field, 0}, + {"RawSockaddrNetlink.Pad", Field, 0}, + {"RawSockaddrNetlink.Pid", Field, 0}, + {"RawSockaddrUnix", Type, 0}, + {"RawSockaddrUnix.Family", Field, 0}, + {"RawSockaddrUnix.Len", Field, 0}, + {"RawSockaddrUnix.Pad_cgo_0", Field, 2}, + {"RawSockaddrUnix.Path", Field, 0}, + {"RawSyscall", Func, 0}, + {"RawSyscall6", Func, 0}, + {"Read", Func, 0}, + {"ReadConsole", Func, 1}, + {"ReadDirectoryChanges", Func, 0}, + {"ReadDirent", Func, 0}, + {"ReadFile", Func, 0}, + {"Readlink", Func, 0}, + {"Reboot", Func, 0}, + {"Recvfrom", Func, 0}, + {"Recvmsg", Func, 0}, + {"RegCloseKey", Func, 0}, + {"RegEnumKeyEx", Func, 0}, + {"RegOpenKeyEx", Func, 0}, + {"RegQueryInfoKey", Func, 0}, + {"RegQueryValueEx", Func, 0}, + {"RemoveDirectory", Func, 0}, + {"Removexattr", Func, 1}, + {"Rename", Func, 0}, + {"Renameat", Func, 0}, + {"Revoke", Func, 0}, + {"Rlimit", Type, 0}, + {"Rlimit.Cur", Field, 0}, + {"Rlimit.Max", Field, 0}, + {"Rmdir", Func, 0}, + {"RouteMessage", Type, 0}, + {"RouteMessage.Data", Field, 0}, + {"RouteMessage.Header", Field, 0}, + {"RouteRIB", Func, 0}, + {"RoutingMessage", Type, 0}, + {"RtAttr", Type, 0}, + {"RtAttr.Len", Field, 0}, + {"RtAttr.Type", Field, 0}, + {"RtGenmsg", Type, 0}, + {"RtGenmsg.Family", Field, 0}, + {"RtMetrics", Type, 0}, + {"RtMetrics.Expire", Field, 0}, + {"RtMetrics.Filler", Field, 0}, + {"RtMetrics.Hopcount", Field, 0}, + {"RtMetrics.Locks", Field, 0}, + {"RtMetrics.Mtu", Field, 0}, + {"RtMetrics.Pad", Field, 3}, + {"RtMetrics.Pksent", Field, 0}, + {"RtMetrics.Recvpipe", Field, 0}, + {"RtMetrics.Refcnt", Field, 2}, + {"RtMetrics.Rtt", Field, 0}, + {"RtMetrics.Rttvar", Field, 0}, + {"RtMetrics.Sendpipe", Field, 0}, + {"RtMetrics.Ssthresh", Field, 0}, + {"RtMetrics.Weight", Field, 0}, + {"RtMsg", Type, 0}, + {"RtMsg.Dst_len", Field, 0}, + {"RtMsg.Family", Field, 0}, + {"RtMsg.Flags", Field, 0}, + {"RtMsg.Protocol", Field, 0}, + {"RtMsg.Scope", Field, 0}, + {"RtMsg.Src_len", Field, 0}, + {"RtMsg.Table", Field, 0}, + {"RtMsg.Tos", Field, 0}, + {"RtMsg.Type", Field, 0}, + {"RtMsghdr", Type, 0}, + {"RtMsghdr.Addrs", Field, 0}, + {"RtMsghdr.Errno", Field, 0}, + {"RtMsghdr.Flags", Field, 0}, + {"RtMsghdr.Fmask", Field, 0}, + {"RtMsghdr.Hdrlen", Field, 2}, + {"RtMsghdr.Index", Field, 0}, + {"RtMsghdr.Inits", Field, 0}, + {"RtMsghdr.Mpls", Field, 2}, + {"RtMsghdr.Msglen", Field, 0}, + {"RtMsghdr.Pad_cgo_0", Field, 0}, + {"RtMsghdr.Pad_cgo_1", Field, 2}, + {"RtMsghdr.Pid", Field, 0}, + {"RtMsghdr.Priority", Field, 2}, + {"RtMsghdr.Rmx", Field, 0}, + {"RtMsghdr.Seq", Field, 0}, + {"RtMsghdr.Tableid", Field, 2}, + {"RtMsghdr.Type", Field, 0}, + {"RtMsghdr.Use", Field, 0}, + {"RtMsghdr.Version", Field, 0}, + {"RtNexthop", Type, 0}, + {"RtNexthop.Flags", Field, 0}, + {"RtNexthop.Hops", Field, 0}, + {"RtNexthop.Ifindex", Field, 0}, + {"RtNexthop.Len", Field, 0}, + {"Rusage", Type, 0}, + {"Rusage.CreationTime", Field, 0}, + {"Rusage.ExitTime", Field, 0}, + {"Rusage.Idrss", Field, 0}, + {"Rusage.Inblock", Field, 0}, + {"Rusage.Isrss", Field, 0}, + {"Rusage.Ixrss", Field, 0}, + {"Rusage.KernelTime", Field, 0}, + {"Rusage.Majflt", Field, 0}, + {"Rusage.Maxrss", Field, 0}, + {"Rusage.Minflt", Field, 0}, + {"Rusage.Msgrcv", Field, 0}, + {"Rusage.Msgsnd", Field, 0}, + {"Rusage.Nivcsw", Field, 0}, + {"Rusage.Nsignals", Field, 0}, + {"Rusage.Nswap", Field, 0}, + {"Rusage.Nvcsw", Field, 0}, + {"Rusage.Oublock", Field, 0}, + {"Rusage.Stime", Field, 0}, + {"Rusage.UserTime", Field, 0}, + {"Rusage.Utime", Field, 0}, + {"SCM_BINTIME", Const, 0}, + {"SCM_CREDENTIALS", Const, 0}, + {"SCM_CREDS", Const, 0}, + {"SCM_RIGHTS", Const, 0}, + {"SCM_TIMESTAMP", Const, 0}, + {"SCM_TIMESTAMPING", Const, 0}, + {"SCM_TIMESTAMPNS", Const, 0}, + {"SCM_TIMESTAMP_MONOTONIC", Const, 0}, + {"SHUT_RD", Const, 0}, + {"SHUT_RDWR", Const, 0}, + {"SHUT_WR", Const, 0}, + {"SID", Type, 0}, + {"SIDAndAttributes", Type, 0}, + {"SIDAndAttributes.Attributes", Field, 0}, + {"SIDAndAttributes.Sid", Field, 0}, + {"SIGABRT", Const, 0}, + {"SIGALRM", Const, 0}, + {"SIGBUS", Const, 0}, + {"SIGCHLD", Const, 0}, + {"SIGCLD", Const, 0}, + {"SIGCONT", Const, 0}, + {"SIGEMT", Const, 0}, + {"SIGFPE", Const, 0}, + {"SIGHUP", Const, 0}, + {"SIGILL", Const, 0}, + {"SIGINFO", Const, 0}, + {"SIGINT", Const, 0}, + {"SIGIO", Const, 0}, + {"SIGIOT", Const, 0}, + {"SIGKILL", Const, 0}, + {"SIGLIBRT", Const, 1}, + {"SIGLWP", Const, 0}, + {"SIGPIPE", Const, 0}, + {"SIGPOLL", Const, 0}, + {"SIGPROF", Const, 0}, + {"SIGPWR", Const, 0}, + {"SIGQUIT", Const, 0}, + {"SIGSEGV", Const, 0}, + {"SIGSTKFLT", Const, 0}, + {"SIGSTOP", Const, 0}, + {"SIGSYS", Const, 0}, + {"SIGTERM", Const, 0}, + {"SIGTHR", Const, 0}, + {"SIGTRAP", Const, 0}, + {"SIGTSTP", Const, 0}, + {"SIGTTIN", Const, 0}, + {"SIGTTOU", Const, 0}, + {"SIGUNUSED", Const, 0}, + {"SIGURG", Const, 0}, + {"SIGUSR1", Const, 0}, + {"SIGUSR2", Const, 0}, + {"SIGVTALRM", Const, 0}, + {"SIGWINCH", Const, 0}, + {"SIGXCPU", Const, 0}, + {"SIGXFSZ", Const, 0}, + {"SIOCADDDLCI", Const, 0}, + {"SIOCADDMULTI", Const, 0}, + {"SIOCADDRT", Const, 0}, + {"SIOCAIFADDR", Const, 0}, + {"SIOCAIFGROUP", Const, 0}, + {"SIOCALIFADDR", Const, 0}, + {"SIOCARPIPLL", Const, 0}, + {"SIOCATMARK", Const, 0}, + {"SIOCAUTOADDR", Const, 0}, + {"SIOCAUTONETMASK", Const, 0}, + {"SIOCBRDGADD", Const, 1}, + {"SIOCBRDGADDS", Const, 1}, + {"SIOCBRDGARL", Const, 1}, + {"SIOCBRDGDADDR", Const, 1}, + {"SIOCBRDGDEL", Const, 1}, + {"SIOCBRDGDELS", Const, 1}, + {"SIOCBRDGFLUSH", Const, 1}, + {"SIOCBRDGFRL", Const, 1}, + {"SIOCBRDGGCACHE", Const, 1}, + {"SIOCBRDGGFD", Const, 1}, + {"SIOCBRDGGHT", Const, 1}, + {"SIOCBRDGGIFFLGS", Const, 1}, + {"SIOCBRDGGMA", Const, 1}, + {"SIOCBRDGGPARAM", Const, 1}, + {"SIOCBRDGGPRI", Const, 1}, + {"SIOCBRDGGRL", Const, 1}, + {"SIOCBRDGGSIFS", Const, 1}, + {"SIOCBRDGGTO", Const, 1}, + {"SIOCBRDGIFS", Const, 1}, + {"SIOCBRDGRTS", Const, 1}, + {"SIOCBRDGSADDR", Const, 1}, + {"SIOCBRDGSCACHE", Const, 1}, + {"SIOCBRDGSFD", Const, 1}, + {"SIOCBRDGSHT", Const, 1}, + {"SIOCBRDGSIFCOST", Const, 1}, + {"SIOCBRDGSIFFLGS", Const, 1}, + {"SIOCBRDGSIFPRIO", Const, 1}, + {"SIOCBRDGSMA", Const, 1}, + {"SIOCBRDGSPRI", Const, 1}, + {"SIOCBRDGSPROTO", Const, 1}, + {"SIOCBRDGSTO", Const, 1}, + {"SIOCBRDGSTXHC", Const, 1}, + {"SIOCDARP", Const, 0}, + {"SIOCDELDLCI", Const, 0}, + {"SIOCDELMULTI", Const, 0}, + {"SIOCDELRT", Const, 0}, + {"SIOCDEVPRIVATE", Const, 0}, + {"SIOCDIFADDR", Const, 0}, + {"SIOCDIFGROUP", Const, 0}, + {"SIOCDIFPHYADDR", Const, 0}, + {"SIOCDLIFADDR", Const, 0}, + {"SIOCDRARP", Const, 0}, + {"SIOCGARP", Const, 0}, + {"SIOCGDRVSPEC", Const, 0}, + {"SIOCGETKALIVE", Const, 1}, + {"SIOCGETLABEL", Const, 1}, + {"SIOCGETPFLOW", Const, 1}, + {"SIOCGETPFSYNC", Const, 1}, + {"SIOCGETSGCNT", Const, 0}, + {"SIOCGETVIFCNT", Const, 0}, + {"SIOCGETVLAN", Const, 0}, + {"SIOCGHIWAT", Const, 0}, + {"SIOCGIFADDR", Const, 0}, + {"SIOCGIFADDRPREF", Const, 1}, + {"SIOCGIFALIAS", Const, 1}, + {"SIOCGIFALTMTU", Const, 0}, + {"SIOCGIFASYNCMAP", Const, 0}, + {"SIOCGIFBOND", Const, 0}, + {"SIOCGIFBR", Const, 0}, + {"SIOCGIFBRDADDR", Const, 0}, + {"SIOCGIFCAP", Const, 0}, + {"SIOCGIFCONF", Const, 0}, + {"SIOCGIFCOUNT", Const, 0}, + {"SIOCGIFDATA", Const, 1}, + {"SIOCGIFDESCR", Const, 0}, + {"SIOCGIFDEVMTU", Const, 0}, + {"SIOCGIFDLT", Const, 1}, + {"SIOCGIFDSTADDR", Const, 0}, + {"SIOCGIFENCAP", Const, 0}, + {"SIOCGIFFIB", Const, 1}, + {"SIOCGIFFLAGS", Const, 0}, + {"SIOCGIFGATTR", Const, 1}, + {"SIOCGIFGENERIC", Const, 0}, + {"SIOCGIFGMEMB", Const, 0}, + {"SIOCGIFGROUP", Const, 0}, + {"SIOCGIFHARDMTU", Const, 3}, + {"SIOCGIFHWADDR", Const, 0}, + {"SIOCGIFINDEX", Const, 0}, + {"SIOCGIFKPI", Const, 0}, + {"SIOCGIFMAC", Const, 0}, + {"SIOCGIFMAP", Const, 0}, + {"SIOCGIFMEDIA", Const, 0}, + {"SIOCGIFMEM", Const, 0}, + {"SIOCGIFMETRIC", Const, 0}, + {"SIOCGIFMTU", Const, 0}, + {"SIOCGIFNAME", Const, 0}, + {"SIOCGIFNETMASK", Const, 0}, + {"SIOCGIFPDSTADDR", Const, 0}, + {"SIOCGIFPFLAGS", Const, 0}, + {"SIOCGIFPHYS", Const, 0}, + {"SIOCGIFPRIORITY", Const, 1}, + {"SIOCGIFPSRCADDR", Const, 0}, + {"SIOCGIFRDOMAIN", Const, 1}, + {"SIOCGIFRTLABEL", Const, 1}, + {"SIOCGIFSLAVE", Const, 0}, + {"SIOCGIFSTATUS", Const, 0}, + {"SIOCGIFTIMESLOT", Const, 1}, + {"SIOCGIFTXQLEN", Const, 0}, + {"SIOCGIFVLAN", Const, 0}, + {"SIOCGIFWAKEFLAGS", Const, 0}, + {"SIOCGIFXFLAGS", Const, 1}, + {"SIOCGLIFADDR", Const, 0}, + {"SIOCGLIFPHYADDR", Const, 0}, + {"SIOCGLIFPHYRTABLE", Const, 1}, + {"SIOCGLIFPHYTTL", Const, 3}, + {"SIOCGLINKSTR", Const, 1}, + {"SIOCGLOWAT", Const, 0}, + {"SIOCGPGRP", Const, 0}, + {"SIOCGPRIVATE_0", Const, 0}, + {"SIOCGPRIVATE_1", Const, 0}, + {"SIOCGRARP", Const, 0}, + {"SIOCGSPPPPARAMS", Const, 3}, + {"SIOCGSTAMP", Const, 0}, + {"SIOCGSTAMPNS", Const, 0}, + {"SIOCGVH", Const, 1}, + {"SIOCGVNETID", Const, 3}, + {"SIOCIFCREATE", Const, 0}, + {"SIOCIFCREATE2", Const, 0}, + {"SIOCIFDESTROY", Const, 0}, + {"SIOCIFGCLONERS", Const, 0}, + {"SIOCINITIFADDR", Const, 1}, + {"SIOCPROTOPRIVATE", Const, 0}, + {"SIOCRSLVMULTI", Const, 0}, + {"SIOCRTMSG", Const, 0}, + {"SIOCSARP", Const, 0}, + {"SIOCSDRVSPEC", Const, 0}, + {"SIOCSETKALIVE", Const, 1}, + {"SIOCSETLABEL", Const, 1}, + {"SIOCSETPFLOW", Const, 1}, + {"SIOCSETPFSYNC", Const, 1}, + {"SIOCSETVLAN", Const, 0}, + {"SIOCSHIWAT", Const, 0}, + {"SIOCSIFADDR", Const, 0}, + {"SIOCSIFADDRPREF", Const, 1}, + {"SIOCSIFALTMTU", Const, 0}, + {"SIOCSIFASYNCMAP", Const, 0}, + {"SIOCSIFBOND", Const, 0}, + {"SIOCSIFBR", Const, 0}, + {"SIOCSIFBRDADDR", Const, 0}, + {"SIOCSIFCAP", Const, 0}, + {"SIOCSIFDESCR", Const, 0}, + {"SIOCSIFDSTADDR", Const, 0}, + {"SIOCSIFENCAP", Const, 0}, + {"SIOCSIFFIB", Const, 1}, + {"SIOCSIFFLAGS", Const, 0}, + {"SIOCSIFGATTR", Const, 1}, + {"SIOCSIFGENERIC", Const, 0}, + {"SIOCSIFHWADDR", Const, 0}, + {"SIOCSIFHWBROADCAST", Const, 0}, + {"SIOCSIFKPI", Const, 0}, + {"SIOCSIFLINK", Const, 0}, + {"SIOCSIFLLADDR", Const, 0}, + {"SIOCSIFMAC", Const, 0}, + {"SIOCSIFMAP", Const, 0}, + {"SIOCSIFMEDIA", Const, 0}, + {"SIOCSIFMEM", Const, 0}, + {"SIOCSIFMETRIC", Const, 0}, + {"SIOCSIFMTU", Const, 0}, + {"SIOCSIFNAME", Const, 0}, + {"SIOCSIFNETMASK", Const, 0}, + {"SIOCSIFPFLAGS", Const, 0}, + {"SIOCSIFPHYADDR", Const, 0}, + {"SIOCSIFPHYS", Const, 0}, + {"SIOCSIFPRIORITY", Const, 1}, + {"SIOCSIFRDOMAIN", Const, 1}, + {"SIOCSIFRTLABEL", Const, 1}, + {"SIOCSIFRVNET", Const, 0}, + {"SIOCSIFSLAVE", Const, 0}, + {"SIOCSIFTIMESLOT", Const, 1}, + {"SIOCSIFTXQLEN", Const, 0}, + {"SIOCSIFVLAN", Const, 0}, + {"SIOCSIFVNET", Const, 0}, + {"SIOCSIFXFLAGS", Const, 1}, + {"SIOCSLIFPHYADDR", Const, 0}, + {"SIOCSLIFPHYRTABLE", Const, 1}, + {"SIOCSLIFPHYTTL", Const, 3}, + {"SIOCSLINKSTR", Const, 1}, + {"SIOCSLOWAT", Const, 0}, + {"SIOCSPGRP", Const, 0}, + {"SIOCSRARP", Const, 0}, + {"SIOCSSPPPPARAMS", Const, 3}, + {"SIOCSVH", Const, 1}, + {"SIOCSVNETID", Const, 3}, + {"SIOCZIFDATA", Const, 1}, + {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1}, + {"SIO_GET_INTERFACE_LIST", Const, 0}, + {"SIO_KEEPALIVE_VALS", Const, 3}, + {"SIO_UDP_CONNRESET", Const, 4}, + {"SOCK_CLOEXEC", Const, 0}, + {"SOCK_DCCP", Const, 0}, + {"SOCK_DGRAM", Const, 0}, + {"SOCK_FLAGS_MASK", Const, 1}, + {"SOCK_MAXADDRLEN", Const, 0}, + {"SOCK_NONBLOCK", Const, 0}, + {"SOCK_NOSIGPIPE", Const, 1}, + {"SOCK_PACKET", Const, 0}, + {"SOCK_RAW", Const, 0}, + {"SOCK_RDM", Const, 0}, + {"SOCK_SEQPACKET", Const, 0}, + {"SOCK_STREAM", Const, 0}, + {"SOL_AAL", Const, 0}, + {"SOL_ATM", Const, 0}, + {"SOL_DECNET", Const, 0}, + {"SOL_ICMPV6", Const, 0}, + {"SOL_IP", Const, 0}, + {"SOL_IPV6", Const, 0}, + {"SOL_IRDA", Const, 0}, + {"SOL_PACKET", Const, 0}, + {"SOL_RAW", Const, 0}, + {"SOL_SOCKET", Const, 0}, + {"SOL_TCP", Const, 0}, + {"SOL_X25", Const, 0}, + {"SOMAXCONN", Const, 0}, + {"SO_ACCEPTCONN", Const, 0}, + {"SO_ACCEPTFILTER", Const, 0}, + {"SO_ATTACH_FILTER", Const, 0}, + {"SO_BINDANY", Const, 1}, + {"SO_BINDTODEVICE", Const, 0}, + {"SO_BINTIME", Const, 0}, + {"SO_BROADCAST", Const, 0}, + {"SO_BSDCOMPAT", Const, 0}, + {"SO_DEBUG", Const, 0}, + {"SO_DETACH_FILTER", Const, 0}, + {"SO_DOMAIN", Const, 0}, + {"SO_DONTROUTE", Const, 0}, + {"SO_DONTTRUNC", Const, 0}, + {"SO_ERROR", Const, 0}, + {"SO_KEEPALIVE", Const, 0}, + {"SO_LABEL", Const, 0}, + {"SO_LINGER", Const, 0}, + {"SO_LINGER_SEC", Const, 0}, + {"SO_LISTENINCQLEN", Const, 0}, + {"SO_LISTENQLEN", Const, 0}, + {"SO_LISTENQLIMIT", Const, 0}, + {"SO_MARK", Const, 0}, + {"SO_NETPROC", Const, 1}, + {"SO_NKE", Const, 0}, + {"SO_NOADDRERR", Const, 0}, + {"SO_NOHEADER", Const, 1}, + {"SO_NOSIGPIPE", Const, 0}, + {"SO_NOTIFYCONFLICT", Const, 0}, + {"SO_NO_CHECK", Const, 0}, + {"SO_NO_DDP", Const, 0}, + {"SO_NO_OFFLOAD", Const, 0}, + {"SO_NP_EXTENSIONS", Const, 0}, + {"SO_NREAD", Const, 0}, + {"SO_NUMRCVPKT", Const, 16}, + {"SO_NWRITE", Const, 0}, + {"SO_OOBINLINE", Const, 0}, + {"SO_OVERFLOWED", Const, 1}, + {"SO_PASSCRED", Const, 0}, + {"SO_PASSSEC", Const, 0}, + {"SO_PEERCRED", Const, 0}, + {"SO_PEERLABEL", Const, 0}, + {"SO_PEERNAME", Const, 0}, + {"SO_PEERSEC", Const, 0}, + {"SO_PRIORITY", Const, 0}, + {"SO_PROTOCOL", Const, 0}, + {"SO_PROTOTYPE", Const, 1}, + {"SO_RANDOMPORT", Const, 0}, + {"SO_RCVBUF", Const, 0}, + {"SO_RCVBUFFORCE", Const, 0}, + {"SO_RCVLOWAT", Const, 0}, + {"SO_RCVTIMEO", Const, 0}, + {"SO_RESTRICTIONS", Const, 0}, + {"SO_RESTRICT_DENYIN", Const, 0}, + {"SO_RESTRICT_DENYOUT", Const, 0}, + {"SO_RESTRICT_DENYSET", Const, 0}, + {"SO_REUSEADDR", Const, 0}, + {"SO_REUSEPORT", Const, 0}, + {"SO_REUSESHAREUID", Const, 0}, + {"SO_RTABLE", Const, 1}, + {"SO_RXQ_OVFL", Const, 0}, + {"SO_SECURITY_AUTHENTICATION", Const, 0}, + {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0}, + {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0}, + {"SO_SETFIB", Const, 0}, + {"SO_SNDBUF", Const, 0}, + {"SO_SNDBUFFORCE", Const, 0}, + {"SO_SNDLOWAT", Const, 0}, + {"SO_SNDTIMEO", Const, 0}, + {"SO_SPLICE", Const, 1}, + {"SO_TIMESTAMP", Const, 0}, + {"SO_TIMESTAMPING", Const, 0}, + {"SO_TIMESTAMPNS", Const, 0}, + {"SO_TIMESTAMP_MONOTONIC", Const, 0}, + {"SO_TYPE", Const, 0}, + {"SO_UPCALLCLOSEWAIT", Const, 0}, + {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0}, + {"SO_UPDATE_CONNECT_CONTEXT", Const, 1}, + {"SO_USELOOPBACK", Const, 0}, + {"SO_USER_COOKIE", Const, 1}, + {"SO_VENDOR", Const, 3}, + {"SO_WANTMORE", Const, 0}, + {"SO_WANTOOBFLAG", Const, 0}, + {"SSLExtraCertChainPolicyPara", Type, 0}, + {"SSLExtraCertChainPolicyPara.AuthType", Field, 0}, + {"SSLExtraCertChainPolicyPara.Checks", Field, 0}, + {"SSLExtraCertChainPolicyPara.ServerName", Field, 0}, + {"SSLExtraCertChainPolicyPara.Size", Field, 0}, + {"STANDARD_RIGHTS_ALL", Const, 0}, + {"STANDARD_RIGHTS_EXECUTE", Const, 0}, + {"STANDARD_RIGHTS_READ", Const, 0}, + {"STANDARD_RIGHTS_REQUIRED", Const, 0}, + {"STANDARD_RIGHTS_WRITE", Const, 0}, + {"STARTF_USESHOWWINDOW", Const, 0}, + {"STARTF_USESTDHANDLES", Const, 0}, + {"STD_ERROR_HANDLE", Const, 0}, + {"STD_INPUT_HANDLE", Const, 0}, + {"STD_OUTPUT_HANDLE", Const, 0}, + {"SUBLANG_ENGLISH_US", Const, 0}, + {"SW_FORCEMINIMIZE", Const, 0}, + {"SW_HIDE", Const, 0}, + {"SW_MAXIMIZE", Const, 0}, + {"SW_MINIMIZE", Const, 0}, + {"SW_NORMAL", Const, 0}, + {"SW_RESTORE", Const, 0}, + {"SW_SHOW", Const, 0}, + {"SW_SHOWDEFAULT", Const, 0}, + {"SW_SHOWMAXIMIZED", Const, 0}, + {"SW_SHOWMINIMIZED", Const, 0}, + {"SW_SHOWMINNOACTIVE", Const, 0}, + {"SW_SHOWNA", Const, 0}, + {"SW_SHOWNOACTIVATE", Const, 0}, + {"SW_SHOWNORMAL", Const, 0}, + {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4}, + {"SYNCHRONIZE", Const, 0}, + {"SYSCTL_VERSION", Const, 1}, + {"SYSCTL_VERS_0", Const, 1}, + {"SYSCTL_VERS_1", Const, 1}, + {"SYSCTL_VERS_MASK", Const, 1}, + {"SYS_ABORT2", Const, 0}, + {"SYS_ACCEPT", Const, 0}, + {"SYS_ACCEPT4", Const, 0}, + {"SYS_ACCEPT_NOCANCEL", Const, 0}, + {"SYS_ACCESS", Const, 0}, + {"SYS_ACCESS_EXTENDED", Const, 0}, + {"SYS_ACCT", Const, 0}, + {"SYS_ADD_KEY", Const, 0}, + {"SYS_ADD_PROFIL", Const, 0}, + {"SYS_ADJFREQ", Const, 1}, + {"SYS_ADJTIME", Const, 0}, + {"SYS_ADJTIMEX", Const, 0}, + {"SYS_AFS_SYSCALL", Const, 0}, + {"SYS_AIO_CANCEL", Const, 0}, + {"SYS_AIO_ERROR", Const, 0}, + {"SYS_AIO_FSYNC", Const, 0}, + {"SYS_AIO_MLOCK", Const, 14}, + {"SYS_AIO_READ", Const, 0}, + {"SYS_AIO_RETURN", Const, 0}, + {"SYS_AIO_SUSPEND", Const, 0}, + {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0}, + {"SYS_AIO_WAITCOMPLETE", Const, 14}, + {"SYS_AIO_WRITE", Const, 0}, + {"SYS_ALARM", Const, 0}, + {"SYS_ARCH_PRCTL", Const, 0}, + {"SYS_ARM_FADVISE64_64", Const, 0}, + {"SYS_ARM_SYNC_FILE_RANGE", Const, 0}, + {"SYS_ATGETMSG", Const, 0}, + {"SYS_ATPGETREQ", Const, 0}, + {"SYS_ATPGETRSP", Const, 0}, + {"SYS_ATPSNDREQ", Const, 0}, + {"SYS_ATPSNDRSP", Const, 0}, + {"SYS_ATPUTMSG", Const, 0}, + {"SYS_ATSOCKET", Const, 0}, + {"SYS_AUDIT", Const, 0}, + {"SYS_AUDITCTL", Const, 0}, + {"SYS_AUDITON", Const, 0}, + {"SYS_AUDIT_SESSION_JOIN", Const, 0}, + {"SYS_AUDIT_SESSION_PORT", Const, 0}, + {"SYS_AUDIT_SESSION_SELF", Const, 0}, + {"SYS_BDFLUSH", Const, 0}, + {"SYS_BIND", Const, 0}, + {"SYS_BINDAT", Const, 3}, + {"SYS_BREAK", Const, 0}, + {"SYS_BRK", Const, 0}, + {"SYS_BSDTHREAD_CREATE", Const, 0}, + {"SYS_BSDTHREAD_REGISTER", Const, 0}, + {"SYS_BSDTHREAD_TERMINATE", Const, 0}, + {"SYS_CAPGET", Const, 0}, + {"SYS_CAPSET", Const, 0}, + {"SYS_CAP_ENTER", Const, 0}, + {"SYS_CAP_FCNTLS_GET", Const, 1}, + {"SYS_CAP_FCNTLS_LIMIT", Const, 1}, + {"SYS_CAP_GETMODE", Const, 0}, + {"SYS_CAP_GETRIGHTS", Const, 0}, + {"SYS_CAP_IOCTLS_GET", Const, 1}, + {"SYS_CAP_IOCTLS_LIMIT", Const, 1}, + {"SYS_CAP_NEW", Const, 0}, + {"SYS_CAP_RIGHTS_GET", Const, 1}, + {"SYS_CAP_RIGHTS_LIMIT", Const, 1}, + {"SYS_CHDIR", Const, 0}, + {"SYS_CHFLAGS", Const, 0}, + {"SYS_CHFLAGSAT", Const, 3}, + {"SYS_CHMOD", Const, 0}, + {"SYS_CHMOD_EXTENDED", Const, 0}, + {"SYS_CHOWN", Const, 0}, + {"SYS_CHOWN32", Const, 0}, + {"SYS_CHROOT", Const, 0}, + {"SYS_CHUD", Const, 0}, + {"SYS_CLOCK_ADJTIME", Const, 0}, + {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1}, + {"SYS_CLOCK_GETRES", Const, 0}, + {"SYS_CLOCK_GETTIME", Const, 0}, + {"SYS_CLOCK_NANOSLEEP", Const, 0}, + {"SYS_CLOCK_SETTIME", Const, 0}, + {"SYS_CLONE", Const, 0}, + {"SYS_CLOSE", Const, 0}, + {"SYS_CLOSEFROM", Const, 0}, + {"SYS_CLOSE_NOCANCEL", Const, 0}, + {"SYS_CONNECT", Const, 0}, + {"SYS_CONNECTAT", Const, 3}, + {"SYS_CONNECT_NOCANCEL", Const, 0}, + {"SYS_COPYFILE", Const, 0}, + {"SYS_CPUSET", Const, 0}, + {"SYS_CPUSET_GETAFFINITY", Const, 0}, + {"SYS_CPUSET_GETID", Const, 0}, + {"SYS_CPUSET_SETAFFINITY", Const, 0}, + {"SYS_CPUSET_SETID", Const, 0}, + {"SYS_CREAT", Const, 0}, + {"SYS_CREATE_MODULE", Const, 0}, + {"SYS_CSOPS", Const, 0}, + {"SYS_CSOPS_AUDITTOKEN", Const, 16}, + {"SYS_DELETE", Const, 0}, + {"SYS_DELETE_MODULE", Const, 0}, + {"SYS_DUP", Const, 0}, + {"SYS_DUP2", Const, 0}, + {"SYS_DUP3", Const, 0}, + {"SYS_EACCESS", Const, 0}, + {"SYS_EPOLL_CREATE", Const, 0}, + {"SYS_EPOLL_CREATE1", Const, 0}, + {"SYS_EPOLL_CTL", Const, 0}, + {"SYS_EPOLL_CTL_OLD", Const, 0}, + {"SYS_EPOLL_PWAIT", Const, 0}, + {"SYS_EPOLL_WAIT", Const, 0}, + {"SYS_EPOLL_WAIT_OLD", Const, 0}, + {"SYS_EVENTFD", Const, 0}, + {"SYS_EVENTFD2", Const, 0}, + {"SYS_EXCHANGEDATA", Const, 0}, + {"SYS_EXECVE", Const, 0}, + {"SYS_EXIT", Const, 0}, + {"SYS_EXIT_GROUP", Const, 0}, + {"SYS_EXTATTRCTL", Const, 0}, + {"SYS_EXTATTR_DELETE_FD", Const, 0}, + {"SYS_EXTATTR_DELETE_FILE", Const, 0}, + {"SYS_EXTATTR_DELETE_LINK", Const, 0}, + {"SYS_EXTATTR_GET_FD", Const, 0}, + {"SYS_EXTATTR_GET_FILE", Const, 0}, + {"SYS_EXTATTR_GET_LINK", Const, 0}, + {"SYS_EXTATTR_LIST_FD", Const, 0}, + {"SYS_EXTATTR_LIST_FILE", Const, 0}, + {"SYS_EXTATTR_LIST_LINK", Const, 0}, + {"SYS_EXTATTR_SET_FD", Const, 0}, + {"SYS_EXTATTR_SET_FILE", Const, 0}, + {"SYS_EXTATTR_SET_LINK", Const, 0}, + {"SYS_FACCESSAT", Const, 0}, + {"SYS_FADVISE64", Const, 0}, + {"SYS_FADVISE64_64", Const, 0}, + {"SYS_FALLOCATE", Const, 0}, + {"SYS_FANOTIFY_INIT", Const, 0}, + {"SYS_FANOTIFY_MARK", Const, 0}, + {"SYS_FCHDIR", Const, 0}, + {"SYS_FCHFLAGS", Const, 0}, + {"SYS_FCHMOD", Const, 0}, + {"SYS_FCHMODAT", Const, 0}, + {"SYS_FCHMOD_EXTENDED", Const, 0}, + {"SYS_FCHOWN", Const, 0}, + {"SYS_FCHOWN32", Const, 0}, + {"SYS_FCHOWNAT", Const, 0}, + {"SYS_FCHROOT", Const, 1}, + {"SYS_FCNTL", Const, 0}, + {"SYS_FCNTL64", Const, 0}, + {"SYS_FCNTL_NOCANCEL", Const, 0}, + {"SYS_FDATASYNC", Const, 0}, + {"SYS_FEXECVE", Const, 0}, + {"SYS_FFCLOCK_GETCOUNTER", Const, 0}, + {"SYS_FFCLOCK_GETESTIMATE", Const, 0}, + {"SYS_FFCLOCK_SETESTIMATE", Const, 0}, + {"SYS_FFSCTL", Const, 0}, + {"SYS_FGETATTRLIST", Const, 0}, + {"SYS_FGETXATTR", Const, 0}, + {"SYS_FHOPEN", Const, 0}, + {"SYS_FHSTAT", Const, 0}, + {"SYS_FHSTATFS", Const, 0}, + {"SYS_FILEPORT_MAKEFD", Const, 0}, + {"SYS_FILEPORT_MAKEPORT", Const, 0}, + {"SYS_FKTRACE", Const, 1}, + {"SYS_FLISTXATTR", Const, 0}, + {"SYS_FLOCK", Const, 0}, + {"SYS_FORK", Const, 0}, + {"SYS_FPATHCONF", Const, 0}, + {"SYS_FREEBSD6_FTRUNCATE", Const, 0}, + {"SYS_FREEBSD6_LSEEK", Const, 0}, + {"SYS_FREEBSD6_MMAP", Const, 0}, + {"SYS_FREEBSD6_PREAD", Const, 0}, + {"SYS_FREEBSD6_PWRITE", Const, 0}, + {"SYS_FREEBSD6_TRUNCATE", Const, 0}, + {"SYS_FREMOVEXATTR", Const, 0}, + {"SYS_FSCTL", Const, 0}, + {"SYS_FSETATTRLIST", Const, 0}, + {"SYS_FSETXATTR", Const, 0}, + {"SYS_FSGETPATH", Const, 0}, + {"SYS_FSTAT", Const, 0}, + {"SYS_FSTAT64", Const, 0}, + {"SYS_FSTAT64_EXTENDED", Const, 0}, + {"SYS_FSTATAT", Const, 0}, + {"SYS_FSTATAT64", Const, 0}, + {"SYS_FSTATFS", Const, 0}, + {"SYS_FSTATFS64", Const, 0}, + {"SYS_FSTATV", Const, 0}, + {"SYS_FSTATVFS1", Const, 1}, + {"SYS_FSTAT_EXTENDED", Const, 0}, + {"SYS_FSYNC", Const, 0}, + {"SYS_FSYNC_NOCANCEL", Const, 0}, + {"SYS_FSYNC_RANGE", Const, 1}, + {"SYS_FTIME", Const, 0}, + {"SYS_FTRUNCATE", Const, 0}, + {"SYS_FTRUNCATE64", Const, 0}, + {"SYS_FUTEX", Const, 0}, + {"SYS_FUTIMENS", Const, 1}, + {"SYS_FUTIMES", Const, 0}, + {"SYS_FUTIMESAT", Const, 0}, + {"SYS_GETATTRLIST", Const, 0}, + {"SYS_GETAUDIT", Const, 0}, + {"SYS_GETAUDIT_ADDR", Const, 0}, + {"SYS_GETAUID", Const, 0}, + {"SYS_GETCONTEXT", Const, 0}, + {"SYS_GETCPU", Const, 0}, + {"SYS_GETCWD", Const, 0}, + {"SYS_GETDENTS", Const, 0}, + {"SYS_GETDENTS64", Const, 0}, + {"SYS_GETDIRENTRIES", Const, 0}, + {"SYS_GETDIRENTRIES64", Const, 0}, + {"SYS_GETDIRENTRIESATTR", Const, 0}, + {"SYS_GETDTABLECOUNT", Const, 1}, + {"SYS_GETDTABLESIZE", Const, 0}, + {"SYS_GETEGID", Const, 0}, + {"SYS_GETEGID32", Const, 0}, + {"SYS_GETEUID", Const, 0}, + {"SYS_GETEUID32", Const, 0}, + {"SYS_GETFH", Const, 0}, + {"SYS_GETFSSTAT", Const, 0}, + {"SYS_GETFSSTAT64", Const, 0}, + {"SYS_GETGID", Const, 0}, + {"SYS_GETGID32", Const, 0}, + {"SYS_GETGROUPS", Const, 0}, + {"SYS_GETGROUPS32", Const, 0}, + {"SYS_GETHOSTUUID", Const, 0}, + {"SYS_GETITIMER", Const, 0}, + {"SYS_GETLCID", Const, 0}, + {"SYS_GETLOGIN", Const, 0}, + {"SYS_GETLOGINCLASS", Const, 0}, + {"SYS_GETPEERNAME", Const, 0}, + {"SYS_GETPGID", Const, 0}, + {"SYS_GETPGRP", Const, 0}, + {"SYS_GETPID", Const, 0}, + {"SYS_GETPMSG", Const, 0}, + {"SYS_GETPPID", Const, 0}, + {"SYS_GETPRIORITY", Const, 0}, + {"SYS_GETRESGID", Const, 0}, + {"SYS_GETRESGID32", Const, 0}, + {"SYS_GETRESUID", Const, 0}, + {"SYS_GETRESUID32", Const, 0}, + {"SYS_GETRLIMIT", Const, 0}, + {"SYS_GETRTABLE", Const, 1}, + {"SYS_GETRUSAGE", Const, 0}, + {"SYS_GETSGROUPS", Const, 0}, + {"SYS_GETSID", Const, 0}, + {"SYS_GETSOCKNAME", Const, 0}, + {"SYS_GETSOCKOPT", Const, 0}, + {"SYS_GETTHRID", Const, 1}, + {"SYS_GETTID", Const, 0}, + {"SYS_GETTIMEOFDAY", Const, 0}, + {"SYS_GETUID", Const, 0}, + {"SYS_GETUID32", Const, 0}, + {"SYS_GETVFSSTAT", Const, 1}, + {"SYS_GETWGROUPS", Const, 0}, + {"SYS_GETXATTR", Const, 0}, + {"SYS_GET_KERNEL_SYMS", Const, 0}, + {"SYS_GET_MEMPOLICY", Const, 0}, + {"SYS_GET_ROBUST_LIST", Const, 0}, + {"SYS_GET_THREAD_AREA", Const, 0}, + {"SYS_GSSD_SYSCALL", Const, 14}, + {"SYS_GTTY", Const, 0}, + {"SYS_IDENTITYSVC", Const, 0}, + {"SYS_IDLE", Const, 0}, + {"SYS_INITGROUPS", Const, 0}, + {"SYS_INIT_MODULE", Const, 0}, + {"SYS_INOTIFY_ADD_WATCH", Const, 0}, + {"SYS_INOTIFY_INIT", Const, 0}, + {"SYS_INOTIFY_INIT1", Const, 0}, + {"SYS_INOTIFY_RM_WATCH", Const, 0}, + {"SYS_IOCTL", Const, 0}, + {"SYS_IOPERM", Const, 0}, + {"SYS_IOPL", Const, 0}, + {"SYS_IOPOLICYSYS", Const, 0}, + {"SYS_IOPRIO_GET", Const, 0}, + {"SYS_IOPRIO_SET", Const, 0}, + {"SYS_IO_CANCEL", Const, 0}, + {"SYS_IO_DESTROY", Const, 0}, + {"SYS_IO_GETEVENTS", Const, 0}, + {"SYS_IO_SETUP", Const, 0}, + {"SYS_IO_SUBMIT", Const, 0}, + {"SYS_IPC", Const, 0}, + {"SYS_ISSETUGID", Const, 0}, + {"SYS_JAIL", Const, 0}, + {"SYS_JAIL_ATTACH", Const, 0}, + {"SYS_JAIL_GET", Const, 0}, + {"SYS_JAIL_REMOVE", Const, 0}, + {"SYS_JAIL_SET", Const, 0}, + {"SYS_KAS_INFO", Const, 16}, + {"SYS_KDEBUG_TRACE", Const, 0}, + {"SYS_KENV", Const, 0}, + {"SYS_KEVENT", Const, 0}, + {"SYS_KEVENT64", Const, 0}, + {"SYS_KEXEC_LOAD", Const, 0}, + {"SYS_KEYCTL", Const, 0}, + {"SYS_KILL", Const, 0}, + {"SYS_KLDFIND", Const, 0}, + {"SYS_KLDFIRSTMOD", Const, 0}, + {"SYS_KLDLOAD", Const, 0}, + {"SYS_KLDNEXT", Const, 0}, + {"SYS_KLDSTAT", Const, 0}, + {"SYS_KLDSYM", Const, 0}, + {"SYS_KLDUNLOAD", Const, 0}, + {"SYS_KLDUNLOADF", Const, 0}, + {"SYS_KMQ_NOTIFY", Const, 14}, + {"SYS_KMQ_OPEN", Const, 14}, + {"SYS_KMQ_SETATTR", Const, 14}, + {"SYS_KMQ_TIMEDRECEIVE", Const, 14}, + {"SYS_KMQ_TIMEDSEND", Const, 14}, + {"SYS_KMQ_UNLINK", Const, 14}, + {"SYS_KQUEUE", Const, 0}, + {"SYS_KQUEUE1", Const, 1}, + {"SYS_KSEM_CLOSE", Const, 14}, + {"SYS_KSEM_DESTROY", Const, 14}, + {"SYS_KSEM_GETVALUE", Const, 14}, + {"SYS_KSEM_INIT", Const, 14}, + {"SYS_KSEM_OPEN", Const, 14}, + {"SYS_KSEM_POST", Const, 14}, + {"SYS_KSEM_TIMEDWAIT", Const, 14}, + {"SYS_KSEM_TRYWAIT", Const, 14}, + {"SYS_KSEM_UNLINK", Const, 14}, + {"SYS_KSEM_WAIT", Const, 14}, + {"SYS_KTIMER_CREATE", Const, 0}, + {"SYS_KTIMER_DELETE", Const, 0}, + {"SYS_KTIMER_GETOVERRUN", Const, 0}, + {"SYS_KTIMER_GETTIME", Const, 0}, + {"SYS_KTIMER_SETTIME", Const, 0}, + {"SYS_KTRACE", Const, 0}, + {"SYS_LCHFLAGS", Const, 0}, + {"SYS_LCHMOD", Const, 0}, + {"SYS_LCHOWN", Const, 0}, + {"SYS_LCHOWN32", Const, 0}, + {"SYS_LEDGER", Const, 16}, + {"SYS_LGETFH", Const, 0}, + {"SYS_LGETXATTR", Const, 0}, + {"SYS_LINK", Const, 0}, + {"SYS_LINKAT", Const, 0}, + {"SYS_LIO_LISTIO", Const, 0}, + {"SYS_LISTEN", Const, 0}, + {"SYS_LISTXATTR", Const, 0}, + {"SYS_LLISTXATTR", Const, 0}, + {"SYS_LOCK", Const, 0}, + {"SYS_LOOKUP_DCOOKIE", Const, 0}, + {"SYS_LPATHCONF", Const, 0}, + {"SYS_LREMOVEXATTR", Const, 0}, + {"SYS_LSEEK", Const, 0}, + {"SYS_LSETXATTR", Const, 0}, + {"SYS_LSTAT", Const, 0}, + {"SYS_LSTAT64", Const, 0}, + {"SYS_LSTAT64_EXTENDED", Const, 0}, + {"SYS_LSTATV", Const, 0}, + {"SYS_LSTAT_EXTENDED", Const, 0}, + {"SYS_LUTIMES", Const, 0}, + {"SYS_MAC_SYSCALL", Const, 0}, + {"SYS_MADVISE", Const, 0}, + {"SYS_MADVISE1", Const, 0}, + {"SYS_MAXSYSCALL", Const, 0}, + {"SYS_MBIND", Const, 0}, + {"SYS_MIGRATE_PAGES", Const, 0}, + {"SYS_MINCORE", Const, 0}, + {"SYS_MINHERIT", Const, 0}, + {"SYS_MKCOMPLEX", Const, 0}, + {"SYS_MKDIR", Const, 0}, + {"SYS_MKDIRAT", Const, 0}, + {"SYS_MKDIR_EXTENDED", Const, 0}, + {"SYS_MKFIFO", Const, 0}, + {"SYS_MKFIFOAT", Const, 0}, + {"SYS_MKFIFO_EXTENDED", Const, 0}, + {"SYS_MKNOD", Const, 0}, + {"SYS_MKNODAT", Const, 0}, + {"SYS_MLOCK", Const, 0}, + {"SYS_MLOCKALL", Const, 0}, + {"SYS_MMAP", Const, 0}, + {"SYS_MMAP2", Const, 0}, + {"SYS_MODCTL", Const, 1}, + {"SYS_MODFIND", Const, 0}, + {"SYS_MODFNEXT", Const, 0}, + {"SYS_MODIFY_LDT", Const, 0}, + {"SYS_MODNEXT", Const, 0}, + {"SYS_MODSTAT", Const, 0}, + {"SYS_MODWATCH", Const, 0}, + {"SYS_MOUNT", Const, 0}, + {"SYS_MOVE_PAGES", Const, 0}, + {"SYS_MPROTECT", Const, 0}, + {"SYS_MPX", Const, 0}, + {"SYS_MQUERY", Const, 1}, + {"SYS_MQ_GETSETATTR", Const, 0}, + {"SYS_MQ_NOTIFY", Const, 0}, + {"SYS_MQ_OPEN", Const, 0}, + {"SYS_MQ_TIMEDRECEIVE", Const, 0}, + {"SYS_MQ_TIMEDSEND", Const, 0}, + {"SYS_MQ_UNLINK", Const, 0}, + {"SYS_MREMAP", Const, 0}, + {"SYS_MSGCTL", Const, 0}, + {"SYS_MSGGET", Const, 0}, + {"SYS_MSGRCV", Const, 0}, + {"SYS_MSGRCV_NOCANCEL", Const, 0}, + {"SYS_MSGSND", Const, 0}, + {"SYS_MSGSND_NOCANCEL", Const, 0}, + {"SYS_MSGSYS", Const, 0}, + {"SYS_MSYNC", Const, 0}, + {"SYS_MSYNC_NOCANCEL", Const, 0}, + {"SYS_MUNLOCK", Const, 0}, + {"SYS_MUNLOCKALL", Const, 0}, + {"SYS_MUNMAP", Const, 0}, + {"SYS_NAME_TO_HANDLE_AT", Const, 0}, + {"SYS_NANOSLEEP", Const, 0}, + {"SYS_NEWFSTATAT", Const, 0}, + {"SYS_NFSCLNT", Const, 0}, + {"SYS_NFSSERVCTL", Const, 0}, + {"SYS_NFSSVC", Const, 0}, + {"SYS_NFSTAT", Const, 0}, + {"SYS_NICE", Const, 0}, + {"SYS_NLM_SYSCALL", Const, 14}, + {"SYS_NLSTAT", Const, 0}, + {"SYS_NMOUNT", Const, 0}, + {"SYS_NSTAT", Const, 0}, + {"SYS_NTP_ADJTIME", Const, 0}, + {"SYS_NTP_GETTIME", Const, 0}, + {"SYS_NUMA_GETAFFINITY", Const, 14}, + {"SYS_NUMA_SETAFFINITY", Const, 14}, + {"SYS_OABI_SYSCALL_BASE", Const, 0}, + {"SYS_OBREAK", Const, 0}, + {"SYS_OLDFSTAT", Const, 0}, + {"SYS_OLDLSTAT", Const, 0}, + {"SYS_OLDOLDUNAME", Const, 0}, + {"SYS_OLDSTAT", Const, 0}, + {"SYS_OLDUNAME", Const, 0}, + {"SYS_OPEN", Const, 0}, + {"SYS_OPENAT", Const, 0}, + {"SYS_OPENBSD_POLL", Const, 0}, + {"SYS_OPEN_BY_HANDLE_AT", Const, 0}, + {"SYS_OPEN_DPROTECTED_NP", Const, 16}, + {"SYS_OPEN_EXTENDED", Const, 0}, + {"SYS_OPEN_NOCANCEL", Const, 0}, + {"SYS_OVADVISE", Const, 0}, + {"SYS_PACCEPT", Const, 1}, + {"SYS_PATHCONF", Const, 0}, + {"SYS_PAUSE", Const, 0}, + {"SYS_PCICONFIG_IOBASE", Const, 0}, + {"SYS_PCICONFIG_READ", Const, 0}, + {"SYS_PCICONFIG_WRITE", Const, 0}, + {"SYS_PDFORK", Const, 0}, + {"SYS_PDGETPID", Const, 0}, + {"SYS_PDKILL", Const, 0}, + {"SYS_PERF_EVENT_OPEN", Const, 0}, + {"SYS_PERSONALITY", Const, 0}, + {"SYS_PID_HIBERNATE", Const, 0}, + {"SYS_PID_RESUME", Const, 0}, + {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0}, + {"SYS_PID_SUSPEND", Const, 0}, + {"SYS_PIPE", Const, 0}, + {"SYS_PIPE2", Const, 0}, + {"SYS_PIVOT_ROOT", Const, 0}, + {"SYS_PMC_CONTROL", Const, 1}, + {"SYS_PMC_GET_INFO", Const, 1}, + {"SYS_POLL", Const, 0}, + {"SYS_POLLTS", Const, 1}, + {"SYS_POLL_NOCANCEL", Const, 0}, + {"SYS_POSIX_FADVISE", Const, 0}, + {"SYS_POSIX_FALLOCATE", Const, 0}, + {"SYS_POSIX_OPENPT", Const, 0}, + {"SYS_POSIX_SPAWN", Const, 0}, + {"SYS_PPOLL", Const, 0}, + {"SYS_PRCTL", Const, 0}, + {"SYS_PREAD", Const, 0}, + {"SYS_PREAD64", Const, 0}, + {"SYS_PREADV", Const, 0}, + {"SYS_PREAD_NOCANCEL", Const, 0}, + {"SYS_PRLIMIT64", Const, 0}, + {"SYS_PROCCTL", Const, 3}, + {"SYS_PROCESS_POLICY", Const, 0}, + {"SYS_PROCESS_VM_READV", Const, 0}, + {"SYS_PROCESS_VM_WRITEV", Const, 0}, + {"SYS_PROC_INFO", Const, 0}, + {"SYS_PROF", Const, 0}, + {"SYS_PROFIL", Const, 0}, + {"SYS_PSELECT", Const, 0}, + {"SYS_PSELECT6", Const, 0}, + {"SYS_PSET_ASSIGN", Const, 1}, + {"SYS_PSET_CREATE", Const, 1}, + {"SYS_PSET_DESTROY", Const, 1}, + {"SYS_PSYNCH_CVBROAD", Const, 0}, + {"SYS_PSYNCH_CVCLRPREPOST", Const, 0}, + {"SYS_PSYNCH_CVSIGNAL", Const, 0}, + {"SYS_PSYNCH_CVWAIT", Const, 0}, + {"SYS_PSYNCH_MUTEXDROP", Const, 0}, + {"SYS_PSYNCH_MUTEXWAIT", Const, 0}, + {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0}, + {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_RDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK2", Const, 0}, + {"SYS_PSYNCH_RW_UPGRADE", Const, 0}, + {"SYS_PSYNCH_RW_WRLOCK", Const, 0}, + {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0}, + {"SYS_PTRACE", Const, 0}, + {"SYS_PUTPMSG", Const, 0}, + {"SYS_PWRITE", Const, 0}, + {"SYS_PWRITE64", Const, 0}, + {"SYS_PWRITEV", Const, 0}, + {"SYS_PWRITE_NOCANCEL", Const, 0}, + {"SYS_QUERY_MODULE", Const, 0}, + {"SYS_QUOTACTL", Const, 0}, + {"SYS_RASCTL", Const, 1}, + {"SYS_RCTL_ADD_RULE", Const, 0}, + {"SYS_RCTL_GET_LIMITS", Const, 0}, + {"SYS_RCTL_GET_RACCT", Const, 0}, + {"SYS_RCTL_GET_RULES", Const, 0}, + {"SYS_RCTL_REMOVE_RULE", Const, 0}, + {"SYS_READ", Const, 0}, + {"SYS_READAHEAD", Const, 0}, + {"SYS_READDIR", Const, 0}, + {"SYS_READLINK", Const, 0}, + {"SYS_READLINKAT", Const, 0}, + {"SYS_READV", Const, 0}, + {"SYS_READV_NOCANCEL", Const, 0}, + {"SYS_READ_NOCANCEL", Const, 0}, + {"SYS_REBOOT", Const, 0}, + {"SYS_RECV", Const, 0}, + {"SYS_RECVFROM", Const, 0}, + {"SYS_RECVFROM_NOCANCEL", Const, 0}, + {"SYS_RECVMMSG", Const, 0}, + {"SYS_RECVMSG", Const, 0}, + {"SYS_RECVMSG_NOCANCEL", Const, 0}, + {"SYS_REMAP_FILE_PAGES", Const, 0}, + {"SYS_REMOVEXATTR", Const, 0}, + {"SYS_RENAME", Const, 0}, + {"SYS_RENAMEAT", Const, 0}, + {"SYS_REQUEST_KEY", Const, 0}, + {"SYS_RESTART_SYSCALL", Const, 0}, + {"SYS_REVOKE", Const, 0}, + {"SYS_RFORK", Const, 0}, + {"SYS_RMDIR", Const, 0}, + {"SYS_RTPRIO", Const, 0}, + {"SYS_RTPRIO_THREAD", Const, 0}, + {"SYS_RT_SIGACTION", Const, 0}, + {"SYS_RT_SIGPENDING", Const, 0}, + {"SYS_RT_SIGPROCMASK", Const, 0}, + {"SYS_RT_SIGQUEUEINFO", Const, 0}, + {"SYS_RT_SIGRETURN", Const, 0}, + {"SYS_RT_SIGSUSPEND", Const, 0}, + {"SYS_RT_SIGTIMEDWAIT", Const, 0}, + {"SYS_RT_TGSIGQUEUEINFO", Const, 0}, + {"SYS_SBRK", Const, 0}, + {"SYS_SCHED_GETAFFINITY", Const, 0}, + {"SYS_SCHED_GETPARAM", Const, 0}, + {"SYS_SCHED_GETSCHEDULER", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0}, + {"SYS_SCHED_RR_GET_INTERVAL", Const, 0}, + {"SYS_SCHED_SETAFFINITY", Const, 0}, + {"SYS_SCHED_SETPARAM", Const, 0}, + {"SYS_SCHED_SETSCHEDULER", Const, 0}, + {"SYS_SCHED_YIELD", Const, 0}, + {"SYS_SCTP_GENERIC_RECVMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0}, + {"SYS_SCTP_PEELOFF", Const, 0}, + {"SYS_SEARCHFS", Const, 0}, + {"SYS_SECURITY", Const, 0}, + {"SYS_SELECT", Const, 0}, + {"SYS_SELECT_NOCANCEL", Const, 0}, + {"SYS_SEMCONFIG", Const, 1}, + {"SYS_SEMCTL", Const, 0}, + {"SYS_SEMGET", Const, 0}, + {"SYS_SEMOP", Const, 0}, + {"SYS_SEMSYS", Const, 0}, + {"SYS_SEMTIMEDOP", Const, 0}, + {"SYS_SEM_CLOSE", Const, 0}, + {"SYS_SEM_DESTROY", Const, 0}, + {"SYS_SEM_GETVALUE", Const, 0}, + {"SYS_SEM_INIT", Const, 0}, + {"SYS_SEM_OPEN", Const, 0}, + {"SYS_SEM_POST", Const, 0}, + {"SYS_SEM_TRYWAIT", Const, 0}, + {"SYS_SEM_UNLINK", Const, 0}, + {"SYS_SEM_WAIT", Const, 0}, + {"SYS_SEM_WAIT_NOCANCEL", Const, 0}, + {"SYS_SEND", Const, 0}, + {"SYS_SENDFILE", Const, 0}, + {"SYS_SENDFILE64", Const, 0}, + {"SYS_SENDMMSG", Const, 0}, + {"SYS_SENDMSG", Const, 0}, + {"SYS_SENDMSG_NOCANCEL", Const, 0}, + {"SYS_SENDTO", Const, 0}, + {"SYS_SENDTO_NOCANCEL", Const, 0}, + {"SYS_SETATTRLIST", Const, 0}, + {"SYS_SETAUDIT", Const, 0}, + {"SYS_SETAUDIT_ADDR", Const, 0}, + {"SYS_SETAUID", Const, 0}, + {"SYS_SETCONTEXT", Const, 0}, + {"SYS_SETDOMAINNAME", Const, 0}, + {"SYS_SETEGID", Const, 0}, + {"SYS_SETEUID", Const, 0}, + {"SYS_SETFIB", Const, 0}, + {"SYS_SETFSGID", Const, 0}, + {"SYS_SETFSGID32", Const, 0}, + {"SYS_SETFSUID", Const, 0}, + {"SYS_SETFSUID32", Const, 0}, + {"SYS_SETGID", Const, 0}, + {"SYS_SETGID32", Const, 0}, + {"SYS_SETGROUPS", Const, 0}, + {"SYS_SETGROUPS32", Const, 0}, + {"SYS_SETHOSTNAME", Const, 0}, + {"SYS_SETITIMER", Const, 0}, + {"SYS_SETLCID", Const, 0}, + {"SYS_SETLOGIN", Const, 0}, + {"SYS_SETLOGINCLASS", Const, 0}, + {"SYS_SETNS", Const, 0}, + {"SYS_SETPGID", Const, 0}, + {"SYS_SETPRIORITY", Const, 0}, + {"SYS_SETPRIVEXEC", Const, 0}, + {"SYS_SETREGID", Const, 0}, + {"SYS_SETREGID32", Const, 0}, + {"SYS_SETRESGID", Const, 0}, + {"SYS_SETRESGID32", Const, 0}, + {"SYS_SETRESUID", Const, 0}, + {"SYS_SETRESUID32", Const, 0}, + {"SYS_SETREUID", Const, 0}, + {"SYS_SETREUID32", Const, 0}, + {"SYS_SETRLIMIT", Const, 0}, + {"SYS_SETRTABLE", Const, 1}, + {"SYS_SETSGROUPS", Const, 0}, + {"SYS_SETSID", Const, 0}, + {"SYS_SETSOCKOPT", Const, 0}, + {"SYS_SETTID", Const, 0}, + {"SYS_SETTID_WITH_PID", Const, 0}, + {"SYS_SETTIMEOFDAY", Const, 0}, + {"SYS_SETUID", Const, 0}, + {"SYS_SETUID32", Const, 0}, + {"SYS_SETWGROUPS", Const, 0}, + {"SYS_SETXATTR", Const, 0}, + {"SYS_SET_MEMPOLICY", Const, 0}, + {"SYS_SET_ROBUST_LIST", Const, 0}, + {"SYS_SET_THREAD_AREA", Const, 0}, + {"SYS_SET_TID_ADDRESS", Const, 0}, + {"SYS_SGETMASK", Const, 0}, + {"SYS_SHARED_REGION_CHECK_NP", Const, 0}, + {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0}, + {"SYS_SHMAT", Const, 0}, + {"SYS_SHMCTL", Const, 0}, + {"SYS_SHMDT", Const, 0}, + {"SYS_SHMGET", Const, 0}, + {"SYS_SHMSYS", Const, 0}, + {"SYS_SHM_OPEN", Const, 0}, + {"SYS_SHM_UNLINK", Const, 0}, + {"SYS_SHUTDOWN", Const, 0}, + {"SYS_SIGACTION", Const, 0}, + {"SYS_SIGALTSTACK", Const, 0}, + {"SYS_SIGNAL", Const, 0}, + {"SYS_SIGNALFD", Const, 0}, + {"SYS_SIGNALFD4", Const, 0}, + {"SYS_SIGPENDING", Const, 0}, + {"SYS_SIGPROCMASK", Const, 0}, + {"SYS_SIGQUEUE", Const, 0}, + {"SYS_SIGQUEUEINFO", Const, 1}, + {"SYS_SIGRETURN", Const, 0}, + {"SYS_SIGSUSPEND", Const, 0}, + {"SYS_SIGSUSPEND_NOCANCEL", Const, 0}, + {"SYS_SIGTIMEDWAIT", Const, 0}, + {"SYS_SIGWAIT", Const, 0}, + {"SYS_SIGWAITINFO", Const, 0}, + {"SYS_SOCKET", Const, 0}, + {"SYS_SOCKETCALL", Const, 0}, + {"SYS_SOCKETPAIR", Const, 0}, + {"SYS_SPLICE", Const, 0}, + {"SYS_SSETMASK", Const, 0}, + {"SYS_SSTK", Const, 0}, + {"SYS_STACK_SNAPSHOT", Const, 0}, + {"SYS_STAT", Const, 0}, + {"SYS_STAT64", Const, 0}, + {"SYS_STAT64_EXTENDED", Const, 0}, + {"SYS_STATFS", Const, 0}, + {"SYS_STATFS64", Const, 0}, + {"SYS_STATV", Const, 0}, + {"SYS_STATVFS1", Const, 1}, + {"SYS_STAT_EXTENDED", Const, 0}, + {"SYS_STIME", Const, 0}, + {"SYS_STTY", Const, 0}, + {"SYS_SWAPCONTEXT", Const, 0}, + {"SYS_SWAPCTL", Const, 1}, + {"SYS_SWAPOFF", Const, 0}, + {"SYS_SWAPON", Const, 0}, + {"SYS_SYMLINK", Const, 0}, + {"SYS_SYMLINKAT", Const, 0}, + {"SYS_SYNC", Const, 0}, + {"SYS_SYNCFS", Const, 0}, + {"SYS_SYNC_FILE_RANGE", Const, 0}, + {"SYS_SYSARCH", Const, 0}, + {"SYS_SYSCALL", Const, 0}, + {"SYS_SYSCALL_BASE", Const, 0}, + {"SYS_SYSFS", Const, 0}, + {"SYS_SYSINFO", Const, 0}, + {"SYS_SYSLOG", Const, 0}, + {"SYS_TEE", Const, 0}, + {"SYS_TGKILL", Const, 0}, + {"SYS_THREAD_SELFID", Const, 0}, + {"SYS_THR_CREATE", Const, 0}, + {"SYS_THR_EXIT", Const, 0}, + {"SYS_THR_KILL", Const, 0}, + {"SYS_THR_KILL2", Const, 0}, + {"SYS_THR_NEW", Const, 0}, + {"SYS_THR_SELF", Const, 0}, + {"SYS_THR_SET_NAME", Const, 0}, + {"SYS_THR_SUSPEND", Const, 0}, + {"SYS_THR_WAKE", Const, 0}, + {"SYS_TIME", Const, 0}, + {"SYS_TIMERFD_CREATE", Const, 0}, + {"SYS_TIMERFD_GETTIME", Const, 0}, + {"SYS_TIMERFD_SETTIME", Const, 0}, + {"SYS_TIMER_CREATE", Const, 0}, + {"SYS_TIMER_DELETE", Const, 0}, + {"SYS_TIMER_GETOVERRUN", Const, 0}, + {"SYS_TIMER_GETTIME", Const, 0}, + {"SYS_TIMER_SETTIME", Const, 0}, + {"SYS_TIMES", Const, 0}, + {"SYS_TKILL", Const, 0}, + {"SYS_TRUNCATE", Const, 0}, + {"SYS_TRUNCATE64", Const, 0}, + {"SYS_TUXCALL", Const, 0}, + {"SYS_UGETRLIMIT", Const, 0}, + {"SYS_ULIMIT", Const, 0}, + {"SYS_UMASK", Const, 0}, + {"SYS_UMASK_EXTENDED", Const, 0}, + {"SYS_UMOUNT", Const, 0}, + {"SYS_UMOUNT2", Const, 0}, + {"SYS_UNAME", Const, 0}, + {"SYS_UNDELETE", Const, 0}, + {"SYS_UNLINK", Const, 0}, + {"SYS_UNLINKAT", Const, 0}, + {"SYS_UNMOUNT", Const, 0}, + {"SYS_UNSHARE", Const, 0}, + {"SYS_USELIB", Const, 0}, + {"SYS_USTAT", Const, 0}, + {"SYS_UTIME", Const, 0}, + {"SYS_UTIMENSAT", Const, 0}, + {"SYS_UTIMES", Const, 0}, + {"SYS_UTRACE", Const, 0}, + {"SYS_UUIDGEN", Const, 0}, + {"SYS_VADVISE", Const, 1}, + {"SYS_VFORK", Const, 0}, + {"SYS_VHANGUP", Const, 0}, + {"SYS_VM86", Const, 0}, + {"SYS_VM86OLD", Const, 0}, + {"SYS_VMSPLICE", Const, 0}, + {"SYS_VM_PRESSURE_MONITOR", Const, 0}, + {"SYS_VSERVER", Const, 0}, + {"SYS_WAIT4", Const, 0}, + {"SYS_WAIT4_NOCANCEL", Const, 0}, + {"SYS_WAIT6", Const, 1}, + {"SYS_WAITEVENT", Const, 0}, + {"SYS_WAITID", Const, 0}, + {"SYS_WAITID_NOCANCEL", Const, 0}, + {"SYS_WAITPID", Const, 0}, + {"SYS_WATCHEVENT", Const, 0}, + {"SYS_WORKQ_KERNRETURN", Const, 0}, + {"SYS_WORKQ_OPEN", Const, 0}, + {"SYS_WRITE", Const, 0}, + {"SYS_WRITEV", Const, 0}, + {"SYS_WRITEV_NOCANCEL", Const, 0}, + {"SYS_WRITE_NOCANCEL", Const, 0}, + {"SYS_YIELD", Const, 0}, + {"SYS__LLSEEK", Const, 0}, + {"SYS__LWP_CONTINUE", Const, 1}, + {"SYS__LWP_CREATE", Const, 1}, + {"SYS__LWP_CTL", Const, 1}, + {"SYS__LWP_DETACH", Const, 1}, + {"SYS__LWP_EXIT", Const, 1}, + {"SYS__LWP_GETNAME", Const, 1}, + {"SYS__LWP_GETPRIVATE", Const, 1}, + {"SYS__LWP_KILL", Const, 1}, + {"SYS__LWP_PARK", Const, 1}, + {"SYS__LWP_SELF", Const, 1}, + {"SYS__LWP_SETNAME", Const, 1}, + {"SYS__LWP_SETPRIVATE", Const, 1}, + {"SYS__LWP_SUSPEND", Const, 1}, + {"SYS__LWP_UNPARK", Const, 1}, + {"SYS__LWP_UNPARK_ALL", Const, 1}, + {"SYS__LWP_WAIT", Const, 1}, + {"SYS__LWP_WAKEUP", Const, 1}, + {"SYS__NEWSELECT", Const, 0}, + {"SYS__PSET_BIND", Const, 1}, + {"SYS__SCHED_GETAFFINITY", Const, 1}, + {"SYS__SCHED_GETPARAM", Const, 1}, + {"SYS__SCHED_SETAFFINITY", Const, 1}, + {"SYS__SCHED_SETPARAM", Const, 1}, + {"SYS__SYSCTL", Const, 0}, + {"SYS__UMTX_LOCK", Const, 0}, + {"SYS__UMTX_OP", Const, 0}, + {"SYS__UMTX_UNLOCK", Const, 0}, + {"SYS___ACL_ACLCHECK_FD", Const, 0}, + {"SYS___ACL_ACLCHECK_FILE", Const, 0}, + {"SYS___ACL_ACLCHECK_LINK", Const, 0}, + {"SYS___ACL_DELETE_FD", Const, 0}, + {"SYS___ACL_DELETE_FILE", Const, 0}, + {"SYS___ACL_DELETE_LINK", Const, 0}, + {"SYS___ACL_GET_FD", Const, 0}, + {"SYS___ACL_GET_FILE", Const, 0}, + {"SYS___ACL_GET_LINK", Const, 0}, + {"SYS___ACL_SET_FD", Const, 0}, + {"SYS___ACL_SET_FILE", Const, 0}, + {"SYS___ACL_SET_LINK", Const, 0}, + {"SYS___CAP_RIGHTS_GET", Const, 14}, + {"SYS___CLONE", Const, 1}, + {"SYS___DISABLE_THREADSIGNAL", Const, 0}, + {"SYS___GETCWD", Const, 0}, + {"SYS___GETLOGIN", Const, 1}, + {"SYS___GET_TCB", Const, 1}, + {"SYS___MAC_EXECVE", Const, 0}, + {"SYS___MAC_GETFSSTAT", Const, 0}, + {"SYS___MAC_GET_FD", Const, 0}, + {"SYS___MAC_GET_FILE", Const, 0}, + {"SYS___MAC_GET_LCID", Const, 0}, + {"SYS___MAC_GET_LCTX", Const, 0}, + {"SYS___MAC_GET_LINK", Const, 0}, + {"SYS___MAC_GET_MOUNT", Const, 0}, + {"SYS___MAC_GET_PID", Const, 0}, + {"SYS___MAC_GET_PROC", Const, 0}, + {"SYS___MAC_MOUNT", Const, 0}, + {"SYS___MAC_SET_FD", Const, 0}, + {"SYS___MAC_SET_FILE", Const, 0}, + {"SYS___MAC_SET_LCTX", Const, 0}, + {"SYS___MAC_SET_LINK", Const, 0}, + {"SYS___MAC_SET_PROC", Const, 0}, + {"SYS___MAC_SYSCALL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___POSIX_CHOWN", Const, 1}, + {"SYS___POSIX_FCHOWN", Const, 1}, + {"SYS___POSIX_LCHOWN", Const, 1}, + {"SYS___POSIX_RENAME", Const, 1}, + {"SYS___PTHREAD_CANCELED", Const, 0}, + {"SYS___PTHREAD_CHDIR", Const, 0}, + {"SYS___PTHREAD_FCHDIR", Const, 0}, + {"SYS___PTHREAD_KILL", Const, 0}, + {"SYS___PTHREAD_MARKCANCEL", Const, 0}, + {"SYS___PTHREAD_SIGMASK", Const, 0}, + {"SYS___QUOTACTL", Const, 1}, + {"SYS___SEMCTL", Const, 1}, + {"SYS___SEMWAIT_SIGNAL", Const, 0}, + {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___SETLOGIN", Const, 1}, + {"SYS___SETUGID", Const, 0}, + {"SYS___SET_TCB", Const, 1}, + {"SYS___SIGACTION_SIGTRAMP", Const, 1}, + {"SYS___SIGTIMEDWAIT", Const, 1}, + {"SYS___SIGWAIT", Const, 0}, + {"SYS___SIGWAIT_NOCANCEL", Const, 0}, + {"SYS___SYSCTL", Const, 0}, + {"SYS___TFORK", Const, 1}, + {"SYS___THREXIT", Const, 1}, + {"SYS___THRSIGDIVERT", Const, 1}, + {"SYS___THRSLEEP", Const, 1}, + {"SYS___THRWAKEUP", Const, 1}, + {"S_ARCH1", Const, 1}, + {"S_ARCH2", Const, 1}, + {"S_BLKSIZE", Const, 0}, + {"S_IEXEC", Const, 0}, + {"S_IFBLK", Const, 0}, + {"S_IFCHR", Const, 0}, + {"S_IFDIR", Const, 0}, + {"S_IFIFO", Const, 0}, + {"S_IFLNK", Const, 0}, + {"S_IFMT", Const, 0}, + {"S_IFREG", Const, 0}, + {"S_IFSOCK", Const, 0}, + {"S_IFWHT", Const, 0}, + {"S_IREAD", Const, 0}, + {"S_IRGRP", Const, 0}, + {"S_IROTH", Const, 0}, + {"S_IRUSR", Const, 0}, + {"S_IRWXG", Const, 0}, + {"S_IRWXO", Const, 0}, + {"S_IRWXU", Const, 0}, + {"S_ISGID", Const, 0}, + {"S_ISTXT", Const, 0}, + {"S_ISUID", Const, 0}, + {"S_ISVTX", Const, 0}, + {"S_IWGRP", Const, 0}, + {"S_IWOTH", Const, 0}, + {"S_IWRITE", Const, 0}, + {"S_IWUSR", Const, 0}, + {"S_IXGRP", Const, 0}, + {"S_IXOTH", Const, 0}, + {"S_IXUSR", Const, 0}, + {"S_LOGIN_SET", Const, 1}, + {"SecurityAttributes", Type, 0}, + {"SecurityAttributes.InheritHandle", Field, 0}, + {"SecurityAttributes.Length", Field, 0}, + {"SecurityAttributes.SecurityDescriptor", Field, 0}, + {"Seek", Func, 0}, + {"Select", Func, 0}, + {"Sendfile", Func, 0}, + {"Sendmsg", Func, 0}, + {"SendmsgN", Func, 3}, + {"Sendto", Func, 0}, + {"Servent", Type, 0}, + {"Servent.Aliases", Field, 0}, + {"Servent.Name", Field, 0}, + {"Servent.Port", Field, 0}, + {"Servent.Proto", Field, 0}, + {"SetBpf", Func, 0}, + {"SetBpfBuflen", Func, 0}, + {"SetBpfDatalink", Func, 0}, + {"SetBpfHeadercmpl", Func, 0}, + {"SetBpfImmediate", Func, 0}, + {"SetBpfInterface", Func, 0}, + {"SetBpfPromisc", Func, 0}, + {"SetBpfTimeout", Func, 0}, + {"SetCurrentDirectory", Func, 0}, + {"SetEndOfFile", Func, 0}, + {"SetEnvironmentVariable", Func, 0}, + {"SetFileAttributes", Func, 0}, + {"SetFileCompletionNotificationModes", Func, 2}, + {"SetFilePointer", Func, 0}, + {"SetFileTime", Func, 0}, + {"SetHandleInformation", Func, 0}, + {"SetKevent", Func, 0}, + {"SetLsfPromisc", Func, 0}, + {"SetNonblock", Func, 0}, + {"Setdomainname", Func, 0}, + {"Setegid", Func, 0}, + {"Setenv", Func, 0}, + {"Seteuid", Func, 0}, + {"Setfsgid", Func, 0}, + {"Setfsuid", Func, 0}, + {"Setgid", Func, 0}, + {"Setgroups", Func, 0}, + {"Sethostname", Func, 0}, + {"Setlogin", Func, 0}, + {"Setpgid", Func, 0}, + {"Setpriority", Func, 0}, + {"Setprivexec", Func, 0}, + {"Setregid", Func, 0}, + {"Setresgid", Func, 0}, + {"Setresuid", Func, 0}, + {"Setreuid", Func, 0}, + {"Setrlimit", Func, 0}, + {"Setsid", Func, 0}, + {"Setsockopt", Func, 0}, + {"SetsockoptByte", Func, 0}, + {"SetsockoptICMPv6Filter", Func, 2}, + {"SetsockoptIPMreq", Func, 0}, + {"SetsockoptIPMreqn", Func, 0}, + {"SetsockoptIPv6Mreq", Func, 0}, + {"SetsockoptInet4Addr", Func, 0}, + {"SetsockoptInt", Func, 0}, + {"SetsockoptLinger", Func, 0}, + {"SetsockoptString", Func, 0}, + {"SetsockoptTimeval", Func, 0}, + {"Settimeofday", Func, 0}, + {"Setuid", Func, 0}, + {"Setxattr", Func, 1}, + {"Shutdown", Func, 0}, + {"SidTypeAlias", Const, 0}, + {"SidTypeComputer", Const, 0}, + {"SidTypeDeletedAccount", Const, 0}, + {"SidTypeDomain", Const, 0}, + {"SidTypeGroup", Const, 0}, + {"SidTypeInvalid", Const, 0}, + {"SidTypeLabel", Const, 0}, + {"SidTypeUnknown", Const, 0}, + {"SidTypeUser", Const, 0}, + {"SidTypeWellKnownGroup", Const, 0}, + {"Signal", Type, 0}, + {"SizeofBpfHdr", Const, 0}, + {"SizeofBpfInsn", Const, 0}, + {"SizeofBpfProgram", Const, 0}, + {"SizeofBpfStat", Const, 0}, + {"SizeofBpfVersion", Const, 0}, + {"SizeofBpfZbuf", Const, 0}, + {"SizeofBpfZbufHeader", Const, 0}, + {"SizeofCmsghdr", Const, 0}, + {"SizeofICMPv6Filter", Const, 2}, + {"SizeofIPMreq", Const, 0}, + {"SizeofIPMreqn", Const, 0}, + {"SizeofIPv6MTUInfo", Const, 2}, + {"SizeofIPv6Mreq", Const, 0}, + {"SizeofIfAddrmsg", Const, 0}, + {"SizeofIfAnnounceMsghdr", Const, 1}, + {"SizeofIfData", Const, 0}, + {"SizeofIfInfomsg", Const, 0}, + {"SizeofIfMsghdr", Const, 0}, + {"SizeofIfaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr2", Const, 0}, + {"SizeofInet4Pktinfo", Const, 0}, + {"SizeofInet6Pktinfo", Const, 0}, + {"SizeofInotifyEvent", Const, 0}, + {"SizeofLinger", Const, 0}, + {"SizeofMsghdr", Const, 0}, + {"SizeofNlAttr", Const, 0}, + {"SizeofNlMsgerr", Const, 0}, + {"SizeofNlMsghdr", Const, 0}, + {"SizeofRtAttr", Const, 0}, + {"SizeofRtGenmsg", Const, 0}, + {"SizeofRtMetrics", Const, 0}, + {"SizeofRtMsg", Const, 0}, + {"SizeofRtMsghdr", Const, 0}, + {"SizeofRtNexthop", Const, 0}, + {"SizeofSockFilter", Const, 0}, + {"SizeofSockFprog", Const, 0}, + {"SizeofSockaddrAny", Const, 0}, + {"SizeofSockaddrDatalink", Const, 0}, + {"SizeofSockaddrInet4", Const, 0}, + {"SizeofSockaddrInet6", Const, 0}, + {"SizeofSockaddrLinklayer", Const, 0}, + {"SizeofSockaddrNetlink", Const, 0}, + {"SizeofSockaddrUnix", Const, 0}, + {"SizeofTCPInfo", Const, 1}, + {"SizeofUcred", Const, 0}, + {"SlicePtrFromStrings", Func, 1}, + {"SockFilter", Type, 0}, + {"SockFilter.Code", Field, 0}, + {"SockFilter.Jf", Field, 0}, + {"SockFilter.Jt", Field, 0}, + {"SockFilter.K", Field, 0}, + {"SockFprog", Type, 0}, + {"SockFprog.Filter", Field, 0}, + {"SockFprog.Len", Field, 0}, + {"SockFprog.Pad_cgo_0", Field, 0}, + {"Sockaddr", Type, 0}, + {"SockaddrDatalink", Type, 0}, + {"SockaddrDatalink.Alen", Field, 0}, + {"SockaddrDatalink.Data", Field, 0}, + {"SockaddrDatalink.Family", Field, 0}, + {"SockaddrDatalink.Index", Field, 0}, + {"SockaddrDatalink.Len", Field, 0}, + {"SockaddrDatalink.Nlen", Field, 0}, + {"SockaddrDatalink.Slen", Field, 0}, + {"SockaddrDatalink.Type", Field, 0}, + {"SockaddrGen", Type, 0}, + {"SockaddrInet4", Type, 0}, + {"SockaddrInet4.Addr", Field, 0}, + {"SockaddrInet4.Port", Field, 0}, + {"SockaddrInet6", Type, 0}, + {"SockaddrInet6.Addr", Field, 0}, + {"SockaddrInet6.Port", Field, 0}, + {"SockaddrInet6.ZoneId", Field, 0}, + {"SockaddrLinklayer", Type, 0}, + {"SockaddrLinklayer.Addr", Field, 0}, + {"SockaddrLinklayer.Halen", Field, 0}, + {"SockaddrLinklayer.Hatype", Field, 0}, + {"SockaddrLinklayer.Ifindex", Field, 0}, + {"SockaddrLinklayer.Pkttype", Field, 0}, + {"SockaddrLinklayer.Protocol", Field, 0}, + {"SockaddrNetlink", Type, 0}, + {"SockaddrNetlink.Family", Field, 0}, + {"SockaddrNetlink.Groups", Field, 0}, + {"SockaddrNetlink.Pad", Field, 0}, + {"SockaddrNetlink.Pid", Field, 0}, + {"SockaddrUnix", Type, 0}, + {"SockaddrUnix.Name", Field, 0}, + {"Socket", Func, 0}, + {"SocketControlMessage", Type, 0}, + {"SocketControlMessage.Data", Field, 0}, + {"SocketControlMessage.Header", Field, 0}, + {"SocketDisableIPv6", Var, 0}, + {"Socketpair", Func, 0}, + {"Splice", Func, 0}, + {"StartProcess", Func, 0}, + {"StartupInfo", Type, 0}, + {"StartupInfo.Cb", Field, 0}, + {"StartupInfo.Desktop", Field, 0}, + {"StartupInfo.FillAttribute", Field, 0}, + {"StartupInfo.Flags", Field, 0}, + {"StartupInfo.ShowWindow", Field, 0}, + {"StartupInfo.StdErr", Field, 0}, + {"StartupInfo.StdInput", Field, 0}, + {"StartupInfo.StdOutput", Field, 0}, + {"StartupInfo.Title", Field, 0}, + {"StartupInfo.X", Field, 0}, + {"StartupInfo.XCountChars", Field, 0}, + {"StartupInfo.XSize", Field, 0}, + {"StartupInfo.Y", Field, 0}, + {"StartupInfo.YCountChars", Field, 0}, + {"StartupInfo.YSize", Field, 0}, + {"Stat", Func, 0}, + {"Stat_t", Type, 0}, + {"Stat_t.Atim", Field, 0}, + {"Stat_t.Atim_ext", Field, 12}, + {"Stat_t.Atimespec", Field, 0}, + {"Stat_t.Birthtimespec", Field, 0}, + {"Stat_t.Blksize", Field, 0}, + {"Stat_t.Blocks", Field, 0}, + {"Stat_t.Btim_ext", Field, 12}, + {"Stat_t.Ctim", Field, 0}, + {"Stat_t.Ctim_ext", Field, 12}, + {"Stat_t.Ctimespec", Field, 0}, + {"Stat_t.Dev", Field, 0}, + {"Stat_t.Flags", Field, 0}, + {"Stat_t.Gen", Field, 0}, + {"Stat_t.Gid", Field, 0}, + {"Stat_t.Ino", Field, 0}, + {"Stat_t.Lspare", Field, 0}, + {"Stat_t.Lspare0", Field, 2}, + {"Stat_t.Lspare1", Field, 2}, + {"Stat_t.Mode", Field, 0}, + {"Stat_t.Mtim", Field, 0}, + {"Stat_t.Mtim_ext", Field, 12}, + {"Stat_t.Mtimespec", Field, 0}, + {"Stat_t.Nlink", Field, 0}, + {"Stat_t.Pad_cgo_0", Field, 0}, + {"Stat_t.Pad_cgo_1", Field, 0}, + {"Stat_t.Pad_cgo_2", Field, 0}, + {"Stat_t.Padding0", Field, 12}, + {"Stat_t.Padding1", Field, 12}, + {"Stat_t.Qspare", Field, 0}, + {"Stat_t.Rdev", Field, 0}, + {"Stat_t.Size", Field, 0}, + {"Stat_t.Spare", Field, 2}, + {"Stat_t.Uid", Field, 0}, + {"Stat_t.X__pad0", Field, 0}, + {"Stat_t.X__pad1", Field, 0}, + {"Stat_t.X__pad2", Field, 0}, + {"Stat_t.X__st_birthtim", Field, 2}, + {"Stat_t.X__st_ino", Field, 0}, + {"Stat_t.X__unused", Field, 0}, + {"Statfs", Func, 0}, + {"Statfs_t", Type, 0}, + {"Statfs_t.Asyncreads", Field, 0}, + {"Statfs_t.Asyncwrites", Field, 0}, + {"Statfs_t.Bavail", Field, 0}, + {"Statfs_t.Bfree", Field, 0}, + {"Statfs_t.Blocks", Field, 0}, + {"Statfs_t.Bsize", Field, 0}, + {"Statfs_t.Charspare", Field, 0}, + {"Statfs_t.F_asyncreads", Field, 2}, + {"Statfs_t.F_asyncwrites", Field, 2}, + {"Statfs_t.F_bavail", Field, 2}, + {"Statfs_t.F_bfree", Field, 2}, + {"Statfs_t.F_blocks", Field, 2}, + {"Statfs_t.F_bsize", Field, 2}, + {"Statfs_t.F_ctime", Field, 2}, + {"Statfs_t.F_favail", Field, 2}, + {"Statfs_t.F_ffree", Field, 2}, + {"Statfs_t.F_files", Field, 2}, + {"Statfs_t.F_flags", Field, 2}, + {"Statfs_t.F_fsid", Field, 2}, + {"Statfs_t.F_fstypename", Field, 2}, + {"Statfs_t.F_iosize", Field, 2}, + {"Statfs_t.F_mntfromname", Field, 2}, + {"Statfs_t.F_mntfromspec", Field, 3}, + {"Statfs_t.F_mntonname", Field, 2}, + {"Statfs_t.F_namemax", Field, 2}, + {"Statfs_t.F_owner", Field, 2}, + {"Statfs_t.F_spare", Field, 2}, + {"Statfs_t.F_syncreads", Field, 2}, + {"Statfs_t.F_syncwrites", Field, 2}, + {"Statfs_t.Ffree", Field, 0}, + {"Statfs_t.Files", Field, 0}, + {"Statfs_t.Flags", Field, 0}, + {"Statfs_t.Frsize", Field, 0}, + {"Statfs_t.Fsid", Field, 0}, + {"Statfs_t.Fssubtype", Field, 0}, + {"Statfs_t.Fstypename", Field, 0}, + {"Statfs_t.Iosize", Field, 0}, + {"Statfs_t.Mntfromname", Field, 0}, + {"Statfs_t.Mntonname", Field, 0}, + {"Statfs_t.Mount_info", Field, 2}, + {"Statfs_t.Namelen", Field, 0}, + {"Statfs_t.Namemax", Field, 0}, + {"Statfs_t.Owner", Field, 0}, + {"Statfs_t.Pad_cgo_0", Field, 0}, + {"Statfs_t.Pad_cgo_1", Field, 2}, + {"Statfs_t.Reserved", Field, 0}, + {"Statfs_t.Spare", Field, 0}, + {"Statfs_t.Syncreads", Field, 0}, + {"Statfs_t.Syncwrites", Field, 0}, + {"Statfs_t.Type", Field, 0}, + {"Statfs_t.Version", Field, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"StringBytePtr", Func, 0}, + {"StringByteSlice", Func, 0}, + {"StringSlicePtr", Func, 0}, + {"StringToSid", Func, 0}, + {"StringToUTF16", Func, 0}, + {"StringToUTF16Ptr", Func, 0}, + {"Symlink", Func, 0}, + {"Sync", Func, 0}, + {"SyncFileRange", Func, 0}, + {"SysProcAttr", Type, 0}, + {"SysProcAttr.AdditionalInheritedHandles", Field, 17}, + {"SysProcAttr.AmbientCaps", Field, 9}, + {"SysProcAttr.CgroupFD", Field, 20}, + {"SysProcAttr.Chroot", Field, 0}, + {"SysProcAttr.Cloneflags", Field, 2}, + {"SysProcAttr.CmdLine", Field, 0}, + {"SysProcAttr.CreationFlags", Field, 1}, + {"SysProcAttr.Credential", Field, 0}, + {"SysProcAttr.Ctty", Field, 1}, + {"SysProcAttr.Foreground", Field, 5}, + {"SysProcAttr.GidMappings", Field, 4}, + {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5}, + {"SysProcAttr.HideWindow", Field, 0}, + {"SysProcAttr.Jail", Field, 21}, + {"SysProcAttr.NoInheritHandles", Field, 16}, + {"SysProcAttr.Noctty", Field, 0}, + {"SysProcAttr.ParentProcess", Field, 17}, + {"SysProcAttr.Pdeathsig", Field, 0}, + {"SysProcAttr.Pgid", Field, 5}, + {"SysProcAttr.PidFD", Field, 22}, + {"SysProcAttr.ProcessAttributes", Field, 13}, + {"SysProcAttr.Ptrace", Field, 0}, + {"SysProcAttr.Setctty", Field, 0}, + {"SysProcAttr.Setpgid", Field, 0}, + {"SysProcAttr.Setsid", Field, 0}, + {"SysProcAttr.ThreadAttributes", Field, 13}, + {"SysProcAttr.Token", Field, 10}, + {"SysProcAttr.UidMappings", Field, 4}, + {"SysProcAttr.Unshareflags", Field, 7}, + {"SysProcAttr.UseCgroupFD", Field, 20}, + {"SysProcIDMap", Type, 4}, + {"SysProcIDMap.ContainerID", Field, 4}, + {"SysProcIDMap.HostID", Field, 4}, + {"SysProcIDMap.Size", Field, 4}, + {"Syscall", Func, 0}, + {"Syscall12", Func, 0}, + {"Syscall15", Func, 0}, + {"Syscall18", Func, 12}, + {"Syscall6", Func, 0}, + {"Syscall9", Func, 0}, + {"SyscallN", Func, 18}, + {"Sysctl", Func, 0}, + {"SysctlUint32", Func, 0}, + {"Sysctlnode", Type, 2}, + {"Sysctlnode.Flags", Field, 2}, + {"Sysctlnode.Name", Field, 2}, + {"Sysctlnode.Num", Field, 2}, + {"Sysctlnode.Un", Field, 2}, + {"Sysctlnode.Ver", Field, 2}, + {"Sysctlnode.X__rsvd", Field, 2}, + {"Sysctlnode.X_sysctl_desc", Field, 2}, + {"Sysctlnode.X_sysctl_func", Field, 2}, + {"Sysctlnode.X_sysctl_parent", Field, 2}, + {"Sysctlnode.X_sysctl_size", Field, 2}, + {"Sysinfo", Func, 0}, + {"Sysinfo_t", Type, 0}, + {"Sysinfo_t.Bufferram", Field, 0}, + {"Sysinfo_t.Freehigh", Field, 0}, + {"Sysinfo_t.Freeram", Field, 0}, + {"Sysinfo_t.Freeswap", Field, 0}, + {"Sysinfo_t.Loads", Field, 0}, + {"Sysinfo_t.Pad", Field, 0}, + {"Sysinfo_t.Pad_cgo_0", Field, 0}, + {"Sysinfo_t.Pad_cgo_1", Field, 0}, + {"Sysinfo_t.Procs", Field, 0}, + {"Sysinfo_t.Sharedram", Field, 0}, + {"Sysinfo_t.Totalhigh", Field, 0}, + {"Sysinfo_t.Totalram", Field, 0}, + {"Sysinfo_t.Totalswap", Field, 0}, + {"Sysinfo_t.Unit", Field, 0}, + {"Sysinfo_t.Uptime", Field, 0}, + {"Sysinfo_t.X_f", Field, 0}, + {"Systemtime", Type, 0}, + {"Systemtime.Day", Field, 0}, + {"Systemtime.DayOfWeek", Field, 0}, + {"Systemtime.Hour", Field, 0}, + {"Systemtime.Milliseconds", Field, 0}, + {"Systemtime.Minute", Field, 0}, + {"Systemtime.Month", Field, 0}, + {"Systemtime.Second", Field, 0}, + {"Systemtime.Year", Field, 0}, + {"TCGETS", Const, 0}, + {"TCIFLUSH", Const, 1}, + {"TCIOFLUSH", Const, 1}, + {"TCOFLUSH", Const, 1}, + {"TCPInfo", Type, 1}, + {"TCPInfo.Advmss", Field, 1}, + {"TCPInfo.Ato", Field, 1}, + {"TCPInfo.Backoff", Field, 1}, + {"TCPInfo.Ca_state", Field, 1}, + {"TCPInfo.Fackets", Field, 1}, + {"TCPInfo.Last_ack_recv", Field, 1}, + {"TCPInfo.Last_ack_sent", Field, 1}, + {"TCPInfo.Last_data_recv", Field, 1}, + {"TCPInfo.Last_data_sent", Field, 1}, + {"TCPInfo.Lost", Field, 1}, + {"TCPInfo.Options", Field, 1}, + {"TCPInfo.Pad_cgo_0", Field, 1}, + {"TCPInfo.Pmtu", Field, 1}, + {"TCPInfo.Probes", Field, 1}, + {"TCPInfo.Rcv_mss", Field, 1}, + {"TCPInfo.Rcv_rtt", Field, 1}, + {"TCPInfo.Rcv_space", Field, 1}, + {"TCPInfo.Rcv_ssthresh", Field, 1}, + {"TCPInfo.Reordering", Field, 1}, + {"TCPInfo.Retrans", Field, 1}, + {"TCPInfo.Retransmits", Field, 1}, + {"TCPInfo.Rto", Field, 1}, + {"TCPInfo.Rtt", Field, 1}, + {"TCPInfo.Rttvar", Field, 1}, + {"TCPInfo.Sacked", Field, 1}, + {"TCPInfo.Snd_cwnd", Field, 1}, + {"TCPInfo.Snd_mss", Field, 1}, + {"TCPInfo.Snd_ssthresh", Field, 1}, + {"TCPInfo.State", Field, 1}, + {"TCPInfo.Total_retrans", Field, 1}, + {"TCPInfo.Unacked", Field, 1}, + {"TCPKeepalive", Type, 3}, + {"TCPKeepalive.Interval", Field, 3}, + {"TCPKeepalive.OnOff", Field, 3}, + {"TCPKeepalive.Time", Field, 3}, + {"TCP_CA_NAME_MAX", Const, 0}, + {"TCP_CONGCTL", Const, 1}, + {"TCP_CONGESTION", Const, 0}, + {"TCP_CONNECTIONTIMEOUT", Const, 0}, + {"TCP_CORK", Const, 0}, + {"TCP_DEFER_ACCEPT", Const, 0}, + {"TCP_ENABLE_ECN", Const, 16}, + {"TCP_INFO", Const, 0}, + {"TCP_KEEPALIVE", Const, 0}, + {"TCP_KEEPCNT", Const, 0}, + {"TCP_KEEPIDLE", Const, 0}, + {"TCP_KEEPINIT", Const, 1}, + {"TCP_KEEPINTVL", Const, 0}, + {"TCP_LINGER2", Const, 0}, + {"TCP_MAXBURST", Const, 0}, + {"TCP_MAXHLEN", Const, 0}, + {"TCP_MAXOLEN", Const, 0}, + {"TCP_MAXSEG", Const, 0}, + {"TCP_MAXWIN", Const, 0}, + {"TCP_MAX_SACK", Const, 0}, + {"TCP_MAX_WINSHIFT", Const, 0}, + {"TCP_MD5SIG", Const, 0}, + {"TCP_MD5SIG_MAXKEYLEN", Const, 0}, + {"TCP_MINMSS", Const, 0}, + {"TCP_MINMSSOVERLOAD", Const, 0}, + {"TCP_MSS", Const, 0}, + {"TCP_NODELAY", Const, 0}, + {"TCP_NOOPT", Const, 0}, + {"TCP_NOPUSH", Const, 0}, + {"TCP_NOTSENT_LOWAT", Const, 16}, + {"TCP_NSTATES", Const, 1}, + {"TCP_QUICKACK", Const, 0}, + {"TCP_RXT_CONNDROPTIME", Const, 0}, + {"TCP_RXT_FINDROP", Const, 0}, + {"TCP_SACK_ENABLE", Const, 1}, + {"TCP_SENDMOREACKS", Const, 16}, + {"TCP_SYNCNT", Const, 0}, + {"TCP_VENDOR", Const, 3}, + {"TCP_WINDOW_CLAMP", Const, 0}, + {"TCSAFLUSH", Const, 1}, + {"TCSETS", Const, 0}, + {"TF_DISCONNECT", Const, 0}, + {"TF_REUSE_SOCKET", Const, 0}, + {"TF_USE_DEFAULT_WORKER", Const, 0}, + {"TF_USE_KERNEL_APC", Const, 0}, + {"TF_USE_SYSTEM_THREAD", Const, 0}, + {"TF_WRITE_BEHIND", Const, 0}, + {"TH32CS_INHERIT", Const, 4}, + {"TH32CS_SNAPALL", Const, 4}, + {"TH32CS_SNAPHEAPLIST", Const, 4}, + {"TH32CS_SNAPMODULE", Const, 4}, + {"TH32CS_SNAPMODULE32", Const, 4}, + {"TH32CS_SNAPPROCESS", Const, 4}, + {"TH32CS_SNAPTHREAD", Const, 4}, + {"TIME_ZONE_ID_DAYLIGHT", Const, 0}, + {"TIME_ZONE_ID_STANDARD", Const, 0}, + {"TIME_ZONE_ID_UNKNOWN", Const, 0}, + {"TIOCCBRK", Const, 0}, + {"TIOCCDTR", Const, 0}, + {"TIOCCONS", Const, 0}, + {"TIOCDCDTIMESTAMP", Const, 0}, + {"TIOCDRAIN", Const, 0}, + {"TIOCDSIMICROCODE", Const, 0}, + {"TIOCEXCL", Const, 0}, + {"TIOCEXT", Const, 0}, + {"TIOCFLAG_CDTRCTS", Const, 1}, + {"TIOCFLAG_CLOCAL", Const, 1}, + {"TIOCFLAG_CRTSCTS", Const, 1}, + {"TIOCFLAG_MDMBUF", Const, 1}, + {"TIOCFLAG_PPS", Const, 1}, + {"TIOCFLAG_SOFTCAR", Const, 1}, + {"TIOCFLUSH", Const, 0}, + {"TIOCGDEV", Const, 0}, + {"TIOCGDRAINWAIT", Const, 0}, + {"TIOCGETA", Const, 0}, + {"TIOCGETD", Const, 0}, + {"TIOCGFLAGS", Const, 1}, + {"TIOCGICOUNT", Const, 0}, + {"TIOCGLCKTRMIOS", Const, 0}, + {"TIOCGLINED", Const, 1}, + {"TIOCGPGRP", Const, 0}, + {"TIOCGPTN", Const, 0}, + {"TIOCGQSIZE", Const, 1}, + {"TIOCGRANTPT", Const, 1}, + {"TIOCGRS485", Const, 0}, + {"TIOCGSERIAL", Const, 0}, + {"TIOCGSID", Const, 0}, + {"TIOCGSIZE", Const, 1}, + {"TIOCGSOFTCAR", Const, 0}, + {"TIOCGTSTAMP", Const, 1}, + {"TIOCGWINSZ", Const, 0}, + {"TIOCINQ", Const, 0}, + {"TIOCIXOFF", Const, 0}, + {"TIOCIXON", Const, 0}, + {"TIOCLINUX", Const, 0}, + {"TIOCMBIC", Const, 0}, + {"TIOCMBIS", Const, 0}, + {"TIOCMGDTRWAIT", Const, 0}, + {"TIOCMGET", Const, 0}, + {"TIOCMIWAIT", Const, 0}, + {"TIOCMODG", Const, 0}, + {"TIOCMODS", Const, 0}, + {"TIOCMSDTRWAIT", Const, 0}, + {"TIOCMSET", Const, 0}, + {"TIOCM_CAR", Const, 0}, + {"TIOCM_CD", Const, 0}, + {"TIOCM_CTS", Const, 0}, + {"TIOCM_DCD", Const, 0}, + {"TIOCM_DSR", Const, 0}, + {"TIOCM_DTR", Const, 0}, + {"TIOCM_LE", Const, 0}, + {"TIOCM_RI", Const, 0}, + {"TIOCM_RNG", Const, 0}, + {"TIOCM_RTS", Const, 0}, + {"TIOCM_SR", Const, 0}, + {"TIOCM_ST", Const, 0}, + {"TIOCNOTTY", Const, 0}, + {"TIOCNXCL", Const, 0}, + {"TIOCOUTQ", Const, 0}, + {"TIOCPKT", Const, 0}, + {"TIOCPKT_DATA", Const, 0}, + {"TIOCPKT_DOSTOP", Const, 0}, + {"TIOCPKT_FLUSHREAD", Const, 0}, + {"TIOCPKT_FLUSHWRITE", Const, 0}, + {"TIOCPKT_IOCTL", Const, 0}, + {"TIOCPKT_NOSTOP", Const, 0}, + {"TIOCPKT_START", Const, 0}, + {"TIOCPKT_STOP", Const, 0}, + {"TIOCPTMASTER", Const, 0}, + {"TIOCPTMGET", Const, 1}, + {"TIOCPTSNAME", Const, 1}, + {"TIOCPTYGNAME", Const, 0}, + {"TIOCPTYGRANT", Const, 0}, + {"TIOCPTYUNLK", Const, 0}, + {"TIOCRCVFRAME", Const, 1}, + {"TIOCREMOTE", Const, 0}, + {"TIOCSBRK", Const, 0}, + {"TIOCSCONS", Const, 0}, + {"TIOCSCTTY", Const, 0}, + {"TIOCSDRAINWAIT", Const, 0}, + {"TIOCSDTR", Const, 0}, + {"TIOCSERCONFIG", Const, 0}, + {"TIOCSERGETLSR", Const, 0}, + {"TIOCSERGETMULTI", Const, 0}, + {"TIOCSERGSTRUCT", Const, 0}, + {"TIOCSERGWILD", Const, 0}, + {"TIOCSERSETMULTI", Const, 0}, + {"TIOCSERSWILD", Const, 0}, + {"TIOCSER_TEMT", Const, 0}, + {"TIOCSETA", Const, 0}, + {"TIOCSETAF", Const, 0}, + {"TIOCSETAW", Const, 0}, + {"TIOCSETD", Const, 0}, + {"TIOCSFLAGS", Const, 1}, + {"TIOCSIG", Const, 0}, + {"TIOCSLCKTRMIOS", Const, 0}, + {"TIOCSLINED", Const, 1}, + {"TIOCSPGRP", Const, 0}, + {"TIOCSPTLCK", Const, 0}, + {"TIOCSQSIZE", Const, 1}, + {"TIOCSRS485", Const, 0}, + {"TIOCSSERIAL", Const, 0}, + {"TIOCSSIZE", Const, 1}, + {"TIOCSSOFTCAR", Const, 0}, + {"TIOCSTART", Const, 0}, + {"TIOCSTAT", Const, 0}, + {"TIOCSTI", Const, 0}, + {"TIOCSTOP", Const, 0}, + {"TIOCSTSTAMP", Const, 1}, + {"TIOCSWINSZ", Const, 0}, + {"TIOCTIMESTAMP", Const, 0}, + {"TIOCUCNTL", Const, 0}, + {"TIOCVHANGUP", Const, 0}, + {"TIOCXMTFRAME", Const, 1}, + {"TOKEN_ADJUST_DEFAULT", Const, 0}, + {"TOKEN_ADJUST_GROUPS", Const, 0}, + {"TOKEN_ADJUST_PRIVILEGES", Const, 0}, + {"TOKEN_ADJUST_SESSIONID", Const, 11}, + {"TOKEN_ALL_ACCESS", Const, 0}, + {"TOKEN_ASSIGN_PRIMARY", Const, 0}, + {"TOKEN_DUPLICATE", Const, 0}, + {"TOKEN_EXECUTE", Const, 0}, + {"TOKEN_IMPERSONATE", Const, 0}, + {"TOKEN_QUERY", Const, 0}, + {"TOKEN_QUERY_SOURCE", Const, 0}, + {"TOKEN_READ", Const, 0}, + {"TOKEN_WRITE", Const, 0}, + {"TOSTOP", Const, 0}, + {"TRUNCATE_EXISTING", Const, 0}, + {"TUNATTACHFILTER", Const, 0}, + {"TUNDETACHFILTER", Const, 0}, + {"TUNGETFEATURES", Const, 0}, + {"TUNGETIFF", Const, 0}, + {"TUNGETSNDBUF", Const, 0}, + {"TUNGETVNETHDRSZ", Const, 0}, + {"TUNSETDEBUG", Const, 0}, + {"TUNSETGROUP", Const, 0}, + {"TUNSETIFF", Const, 0}, + {"TUNSETLINK", Const, 0}, + {"TUNSETNOCSUM", Const, 0}, + {"TUNSETOFFLOAD", Const, 0}, + {"TUNSETOWNER", Const, 0}, + {"TUNSETPERSIST", Const, 0}, + {"TUNSETSNDBUF", Const, 0}, + {"TUNSETTXFILTER", Const, 0}, + {"TUNSETVNETHDRSZ", Const, 0}, + {"Tee", Func, 0}, + {"TerminateProcess", Func, 0}, + {"Termios", Type, 0}, + {"Termios.Cc", Field, 0}, + {"Termios.Cflag", Field, 0}, + {"Termios.Iflag", Field, 0}, + {"Termios.Ispeed", Field, 0}, + {"Termios.Lflag", Field, 0}, + {"Termios.Line", Field, 0}, + {"Termios.Oflag", Field, 0}, + {"Termios.Ospeed", Field, 0}, + {"Termios.Pad_cgo_0", Field, 0}, + {"Tgkill", Func, 0}, + {"Time", Func, 0}, + {"Time_t", Type, 0}, + {"Times", Func, 0}, + {"Timespec", Type, 0}, + {"Timespec.Nsec", Field, 0}, + {"Timespec.Pad_cgo_0", Field, 2}, + {"Timespec.Sec", Field, 0}, + {"TimespecToNsec", Func, 0}, + {"Timeval", Type, 0}, + {"Timeval.Pad_cgo_0", Field, 0}, + {"Timeval.Sec", Field, 0}, + {"Timeval.Usec", Field, 0}, + {"Timeval32", Type, 0}, + {"Timeval32.Sec", Field, 0}, + {"Timeval32.Usec", Field, 0}, + {"TimevalToNsec", Func, 0}, + {"Timex", Type, 0}, + {"Timex.Calcnt", Field, 0}, + {"Timex.Constant", Field, 0}, + {"Timex.Errcnt", Field, 0}, + {"Timex.Esterror", Field, 0}, + {"Timex.Freq", Field, 0}, + {"Timex.Jitcnt", Field, 0}, + {"Timex.Jitter", Field, 0}, + {"Timex.Maxerror", Field, 0}, + {"Timex.Modes", Field, 0}, + {"Timex.Offset", Field, 0}, + {"Timex.Pad_cgo_0", Field, 0}, + {"Timex.Pad_cgo_1", Field, 0}, + {"Timex.Pad_cgo_2", Field, 0}, + {"Timex.Pad_cgo_3", Field, 0}, + {"Timex.Ppsfreq", Field, 0}, + {"Timex.Precision", Field, 0}, + {"Timex.Shift", Field, 0}, + {"Timex.Stabil", Field, 0}, + {"Timex.Status", Field, 0}, + {"Timex.Stbcnt", Field, 0}, + {"Timex.Tai", Field, 0}, + {"Timex.Tick", Field, 0}, + {"Timex.Time", Field, 0}, + {"Timex.Tolerance", Field, 0}, + {"Timezoneinformation", Type, 0}, + {"Timezoneinformation.Bias", Field, 0}, + {"Timezoneinformation.DaylightBias", Field, 0}, + {"Timezoneinformation.DaylightDate", Field, 0}, + {"Timezoneinformation.DaylightName", Field, 0}, + {"Timezoneinformation.StandardBias", Field, 0}, + {"Timezoneinformation.StandardDate", Field, 0}, + {"Timezoneinformation.StandardName", Field, 0}, + {"Tms", Type, 0}, + {"Tms.Cstime", Field, 0}, + {"Tms.Cutime", Field, 0}, + {"Tms.Stime", Field, 0}, + {"Tms.Utime", Field, 0}, + {"Token", Type, 0}, + {"TokenAccessInformation", Const, 0}, + {"TokenAuditPolicy", Const, 0}, + {"TokenDefaultDacl", Const, 0}, + {"TokenElevation", Const, 0}, + {"TokenElevationType", Const, 0}, + {"TokenGroups", Const, 0}, + {"TokenGroupsAndPrivileges", Const, 0}, + {"TokenHasRestrictions", Const, 0}, + {"TokenImpersonationLevel", Const, 0}, + {"TokenIntegrityLevel", Const, 0}, + {"TokenLinkedToken", Const, 0}, + {"TokenLogonSid", Const, 0}, + {"TokenMandatoryPolicy", Const, 0}, + {"TokenOrigin", Const, 0}, + {"TokenOwner", Const, 0}, + {"TokenPrimaryGroup", Const, 0}, + {"TokenPrivileges", Const, 0}, + {"TokenRestrictedSids", Const, 0}, + {"TokenSandBoxInert", Const, 0}, + {"TokenSessionId", Const, 0}, + {"TokenSessionReference", Const, 0}, + {"TokenSource", Const, 0}, + {"TokenStatistics", Const, 0}, + {"TokenType", Const, 0}, + {"TokenUIAccess", Const, 0}, + {"TokenUser", Const, 0}, + {"TokenVirtualizationAllowed", Const, 0}, + {"TokenVirtualizationEnabled", Const, 0}, + {"Tokenprimarygroup", Type, 0}, + {"Tokenprimarygroup.PrimaryGroup", Field, 0}, + {"Tokenuser", Type, 0}, + {"Tokenuser.User", Field, 0}, + {"TranslateAccountName", Func, 0}, + {"TranslateName", Func, 0}, + {"TransmitFile", Func, 0}, + {"TransmitFileBuffers", Type, 0}, + {"TransmitFileBuffers.Head", Field, 0}, + {"TransmitFileBuffers.HeadLength", Field, 0}, + {"TransmitFileBuffers.Tail", Field, 0}, + {"TransmitFileBuffers.TailLength", Field, 0}, + {"Truncate", Func, 0}, + {"UNIX_PATH_MAX", Const, 12}, + {"USAGE_MATCH_TYPE_AND", Const, 0}, + {"USAGE_MATCH_TYPE_OR", Const, 0}, + {"UTF16FromString", Func, 1}, + {"UTF16PtrFromString", Func, 1}, + {"UTF16ToString", Func, 0}, + {"Ucred", Type, 0}, + {"Ucred.Gid", Field, 0}, + {"Ucred.Pid", Field, 0}, + {"Ucred.Uid", Field, 0}, + {"Umask", Func, 0}, + {"Uname", Func, 0}, + {"Undelete", Func, 0}, + {"UnixCredentials", Func, 0}, + {"UnixRights", Func, 0}, + {"Unlink", Func, 0}, + {"Unlinkat", Func, 0}, + {"UnmapViewOfFile", Func, 0}, + {"Unmount", Func, 0}, + {"Unsetenv", Func, 4}, + {"Unshare", Func, 0}, + {"UserInfo10", Type, 0}, + {"UserInfo10.Comment", Field, 0}, + {"UserInfo10.FullName", Field, 0}, + {"UserInfo10.Name", Field, 0}, + {"UserInfo10.UsrComment", Field, 0}, + {"Ustat", Func, 0}, + {"Ustat_t", Type, 0}, + {"Ustat_t.Fname", Field, 0}, + {"Ustat_t.Fpack", Field, 0}, + {"Ustat_t.Pad_cgo_0", Field, 0}, + {"Ustat_t.Pad_cgo_1", Field, 0}, + {"Ustat_t.Tfree", Field, 0}, + {"Ustat_t.Tinode", Field, 0}, + {"Utimbuf", Type, 0}, + {"Utimbuf.Actime", Field, 0}, + {"Utimbuf.Modtime", Field, 0}, + {"Utime", Func, 0}, + {"Utimes", Func, 0}, + {"UtimesNano", Func, 1}, + {"Utsname", Type, 0}, + {"Utsname.Domainname", Field, 0}, + {"Utsname.Machine", Field, 0}, + {"Utsname.Nodename", Field, 0}, + {"Utsname.Release", Field, 0}, + {"Utsname.Sysname", Field, 0}, + {"Utsname.Version", Field, 0}, + {"VDISCARD", Const, 0}, + {"VDSUSP", Const, 1}, + {"VEOF", Const, 0}, + {"VEOL", Const, 0}, + {"VEOL2", Const, 0}, + {"VERASE", Const, 0}, + {"VERASE2", Const, 1}, + {"VINTR", Const, 0}, + {"VKILL", Const, 0}, + {"VLNEXT", Const, 0}, + {"VMIN", Const, 0}, + {"VQUIT", Const, 0}, + {"VREPRINT", Const, 0}, + {"VSTART", Const, 0}, + {"VSTATUS", Const, 1}, + {"VSTOP", Const, 0}, + {"VSUSP", Const, 0}, + {"VSWTC", Const, 0}, + {"VT0", Const, 1}, + {"VT1", Const, 1}, + {"VTDLY", Const, 1}, + {"VTIME", Const, 0}, + {"VWERASE", Const, 0}, + {"VirtualLock", Func, 0}, + {"VirtualUnlock", Func, 0}, + {"WAIT_ABANDONED", Const, 0}, + {"WAIT_FAILED", Const, 0}, + {"WAIT_OBJECT_0", Const, 0}, + {"WAIT_TIMEOUT", Const, 0}, + {"WALL", Const, 0}, + {"WALLSIG", Const, 1}, + {"WALTSIG", Const, 1}, + {"WCLONE", Const, 0}, + {"WCONTINUED", Const, 0}, + {"WCOREFLAG", Const, 0}, + {"WEXITED", Const, 0}, + {"WLINUXCLONE", Const, 0}, + {"WNOHANG", Const, 0}, + {"WNOTHREAD", Const, 0}, + {"WNOWAIT", Const, 0}, + {"WNOZOMBIE", Const, 1}, + {"WOPTSCHECKED", Const, 1}, + {"WORDSIZE", Const, 0}, + {"WSABuf", Type, 0}, + {"WSABuf.Buf", Field, 0}, + {"WSABuf.Len", Field, 0}, + {"WSACleanup", Func, 0}, + {"WSADESCRIPTION_LEN", Const, 0}, + {"WSAData", Type, 0}, + {"WSAData.Description", Field, 0}, + {"WSAData.HighVersion", Field, 0}, + {"WSAData.MaxSockets", Field, 0}, + {"WSAData.MaxUdpDg", Field, 0}, + {"WSAData.SystemStatus", Field, 0}, + {"WSAData.VendorInfo", Field, 0}, + {"WSAData.Version", Field, 0}, + {"WSAEACCES", Const, 2}, + {"WSAECONNABORTED", Const, 9}, + {"WSAECONNRESET", Const, 3}, + {"WSAEnumProtocols", Func, 2}, + {"WSAID_CONNECTEX", Var, 1}, + {"WSAIoctl", Func, 0}, + {"WSAPROTOCOL_LEN", Const, 2}, + {"WSAProtocolChain", Type, 2}, + {"WSAProtocolChain.ChainEntries", Field, 2}, + {"WSAProtocolChain.ChainLen", Field, 2}, + {"WSAProtocolInfo", Type, 2}, + {"WSAProtocolInfo.AddressFamily", Field, 2}, + {"WSAProtocolInfo.CatalogEntryId", Field, 2}, + {"WSAProtocolInfo.MaxSockAddr", Field, 2}, + {"WSAProtocolInfo.MessageSize", Field, 2}, + {"WSAProtocolInfo.MinSockAddr", Field, 2}, + {"WSAProtocolInfo.NetworkByteOrder", Field, 2}, + {"WSAProtocolInfo.Protocol", Field, 2}, + {"WSAProtocolInfo.ProtocolChain", Field, 2}, + {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2}, + {"WSAProtocolInfo.ProtocolName", Field, 2}, + {"WSAProtocolInfo.ProviderFlags", Field, 2}, + {"WSAProtocolInfo.ProviderId", Field, 2}, + {"WSAProtocolInfo.ProviderReserved", Field, 2}, + {"WSAProtocolInfo.SecurityScheme", Field, 2}, + {"WSAProtocolInfo.ServiceFlags1", Field, 2}, + {"WSAProtocolInfo.ServiceFlags2", Field, 2}, + {"WSAProtocolInfo.ServiceFlags3", Field, 2}, + {"WSAProtocolInfo.ServiceFlags4", Field, 2}, + {"WSAProtocolInfo.SocketType", Field, 2}, + {"WSAProtocolInfo.Version", Field, 2}, + {"WSARecv", Func, 0}, + {"WSARecvFrom", Func, 0}, + {"WSASYS_STATUS_LEN", Const, 0}, + {"WSASend", Func, 0}, + {"WSASendTo", Func, 0}, + {"WSASendto", Func, 0}, + {"WSAStartup", Func, 0}, + {"WSTOPPED", Const, 0}, + {"WTRAPPED", Const, 1}, + {"WUNTRACED", Const, 0}, + {"Wait4", Func, 0}, + {"WaitForSingleObject", Func, 0}, + {"WaitStatus", Type, 0}, + {"WaitStatus.ExitCode", Field, 0}, + {"Win32FileAttributeData", Type, 0}, + {"Win32FileAttributeData.CreationTime", Field, 0}, + {"Win32FileAttributeData.FileAttributes", Field, 0}, + {"Win32FileAttributeData.FileSizeHigh", Field, 0}, + {"Win32FileAttributeData.FileSizeLow", Field, 0}, + {"Win32FileAttributeData.LastAccessTime", Field, 0}, + {"Win32FileAttributeData.LastWriteTime", Field, 0}, + {"Win32finddata", Type, 0}, + {"Win32finddata.AlternateFileName", Field, 0}, + {"Win32finddata.CreationTime", Field, 0}, + {"Win32finddata.FileAttributes", Field, 0}, + {"Win32finddata.FileName", Field, 0}, + {"Win32finddata.FileSizeHigh", Field, 0}, + {"Win32finddata.FileSizeLow", Field, 0}, + {"Win32finddata.LastAccessTime", Field, 0}, + {"Win32finddata.LastWriteTime", Field, 0}, + {"Win32finddata.Reserved0", Field, 0}, + {"Win32finddata.Reserved1", Field, 0}, + {"Write", Func, 0}, + {"WriteConsole", Func, 1}, + {"WriteFile", Func, 0}, + {"X509_ASN_ENCODING", Const, 0}, + {"XCASE", Const, 0}, + {"XP1_CONNECTIONLESS", Const, 2}, + {"XP1_CONNECT_DATA", Const, 2}, + {"XP1_DISCONNECT_DATA", Const, 2}, + {"XP1_EXPEDITED_DATA", Const, 2}, + {"XP1_GRACEFUL_CLOSE", Const, 2}, + {"XP1_GUARANTEED_DELIVERY", Const, 2}, + {"XP1_GUARANTEED_ORDER", Const, 2}, + {"XP1_IFS_HANDLES", Const, 2}, + {"XP1_MESSAGE_ORIENTED", Const, 2}, + {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2}, + {"XP1_MULTIPOINT_DATA_PLANE", Const, 2}, + {"XP1_PARTIAL_MESSAGE", Const, 2}, + {"XP1_PSEUDO_STREAM", Const, 2}, + {"XP1_QOS_SUPPORTED", Const, 2}, + {"XP1_SAN_SUPPORT_SDP", Const, 2}, + {"XP1_SUPPORT_BROADCAST", Const, 2}, + {"XP1_SUPPORT_MULTIPOINT", Const, 2}, + {"XP1_UNI_RECV", Const, 2}, + {"XP1_UNI_SEND", Const, 2}, + }, + "syscall/js": { + {"CopyBytesToGo", Func, 0}, + {"CopyBytesToJS", Func, 0}, + {"Error", Type, 0}, + {"Func", Type, 0}, + {"FuncOf", Func, 0}, + {"Global", Func, 0}, + {"Null", Func, 0}, + {"Type", Type, 0}, + {"TypeBoolean", Const, 0}, + {"TypeFunction", Const, 0}, + {"TypeNull", Const, 0}, + {"TypeNumber", Const, 0}, + {"TypeObject", Const, 0}, + {"TypeString", Const, 0}, + {"TypeSymbol", Const, 0}, + {"TypeUndefined", Const, 0}, + {"Undefined", Func, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueOf", Func, 0}, + }, + "testing": { + {"(*B).Cleanup", Method, 14}, + {"(*B).Elapsed", Method, 20}, + {"(*B).Error", Method, 0}, + {"(*B).Errorf", Method, 0}, + {"(*B).Fail", Method, 0}, + {"(*B).FailNow", Method, 0}, + {"(*B).Failed", Method, 0}, + {"(*B).Fatal", Method, 0}, + {"(*B).Fatalf", Method, 0}, + {"(*B).Helper", Method, 9}, + {"(*B).Log", Method, 0}, + {"(*B).Logf", Method, 0}, + {"(*B).Name", Method, 8}, + {"(*B).ReportAllocs", Method, 1}, + {"(*B).ReportMetric", Method, 13}, + {"(*B).ResetTimer", Method, 0}, + {"(*B).Run", Method, 7}, + {"(*B).RunParallel", Method, 3}, + {"(*B).SetBytes", Method, 0}, + {"(*B).SetParallelism", Method, 3}, + {"(*B).Setenv", Method, 17}, + {"(*B).Skip", Method, 1}, + {"(*B).SkipNow", Method, 1}, + {"(*B).Skipf", Method, 1}, + {"(*B).Skipped", Method, 1}, + {"(*B).StartTimer", Method, 0}, + {"(*B).StopTimer", Method, 0}, + {"(*B).TempDir", Method, 15}, + {"(*F).Add", Method, 18}, + {"(*F).Cleanup", Method, 18}, + {"(*F).Error", Method, 18}, + {"(*F).Errorf", Method, 18}, + {"(*F).Fail", Method, 18}, + {"(*F).FailNow", Method, 18}, + {"(*F).Failed", Method, 18}, + {"(*F).Fatal", Method, 18}, + {"(*F).Fatalf", Method, 18}, + {"(*F).Fuzz", Method, 18}, + {"(*F).Helper", Method, 18}, + {"(*F).Log", Method, 18}, + {"(*F).Logf", Method, 18}, + {"(*F).Name", Method, 18}, + {"(*F).Setenv", Method, 18}, + {"(*F).Skip", Method, 18}, + {"(*F).SkipNow", Method, 18}, + {"(*F).Skipf", Method, 18}, + {"(*F).Skipped", Method, 18}, + {"(*F).TempDir", Method, 18}, + {"(*M).Run", Method, 4}, + {"(*PB).Next", Method, 3}, + {"(*T).Cleanup", Method, 14}, + {"(*T).Deadline", Method, 15}, + {"(*T).Error", Method, 0}, + {"(*T).Errorf", Method, 0}, + {"(*T).Fail", Method, 0}, + {"(*T).FailNow", Method, 0}, + {"(*T).Failed", Method, 0}, + {"(*T).Fatal", Method, 0}, + {"(*T).Fatalf", Method, 0}, + {"(*T).Helper", Method, 9}, + {"(*T).Log", Method, 0}, + {"(*T).Logf", Method, 0}, + {"(*T).Name", Method, 8}, + {"(*T).Parallel", Method, 0}, + {"(*T).Run", Method, 7}, + {"(*T).Setenv", Method, 17}, + {"(*T).Skip", Method, 1}, + {"(*T).SkipNow", Method, 1}, + {"(*T).Skipf", Method, 1}, + {"(*T).Skipped", Method, 1}, + {"(*T).TempDir", Method, 15}, + {"(BenchmarkResult).AllocedBytesPerOp", Method, 1}, + {"(BenchmarkResult).AllocsPerOp", Method, 1}, + {"(BenchmarkResult).MemString", Method, 1}, + {"(BenchmarkResult).NsPerOp", Method, 0}, + {"(BenchmarkResult).String", Method, 0}, + {"AllocsPerRun", Func, 1}, + {"B", Type, 0}, + {"B.N", Field, 0}, + {"Benchmark", Func, 0}, + {"BenchmarkResult", Type, 0}, + {"BenchmarkResult.Bytes", Field, 0}, + {"BenchmarkResult.Extra", Field, 13}, + {"BenchmarkResult.MemAllocs", Field, 1}, + {"BenchmarkResult.MemBytes", Field, 1}, + {"BenchmarkResult.N", Field, 0}, + {"BenchmarkResult.T", Field, 0}, + {"Cover", Type, 2}, + {"Cover.Blocks", Field, 2}, + {"Cover.Counters", Field, 2}, + {"Cover.CoveredPackages", Field, 2}, + {"Cover.Mode", Field, 2}, + {"CoverBlock", Type, 2}, + {"CoverBlock.Col0", Field, 2}, + {"CoverBlock.Col1", Field, 2}, + {"CoverBlock.Line0", Field, 2}, + {"CoverBlock.Line1", Field, 2}, + {"CoverBlock.Stmts", Field, 2}, + {"CoverMode", Func, 8}, + {"Coverage", Func, 4}, + {"F", Type, 18}, + {"Init", Func, 13}, + {"InternalBenchmark", Type, 0}, + {"InternalBenchmark.F", Field, 0}, + {"InternalBenchmark.Name", Field, 0}, + {"InternalExample", Type, 0}, + {"InternalExample.F", Field, 0}, + {"InternalExample.Name", Field, 0}, + {"InternalExample.Output", Field, 0}, + {"InternalExample.Unordered", Field, 7}, + {"InternalFuzzTarget", Type, 18}, + {"InternalFuzzTarget.Fn", Field, 18}, + {"InternalFuzzTarget.Name", Field, 18}, + {"InternalTest", Type, 0}, + {"InternalTest.F", Field, 0}, + {"InternalTest.Name", Field, 0}, + {"M", Type, 4}, + {"Main", Func, 0}, + {"MainStart", Func, 4}, + {"PB", Type, 3}, + {"RegisterCover", Func, 2}, + {"RunBenchmarks", Func, 0}, + {"RunExamples", Func, 0}, + {"RunTests", Func, 0}, + {"Short", Func, 0}, + {"T", Type, 0}, + {"TB", Type, 2}, + {"Testing", Func, 21}, + {"Verbose", Func, 1}, + }, + "testing/fstest": { + {"(MapFS).Glob", Method, 16}, + {"(MapFS).Open", Method, 16}, + {"(MapFS).ReadDir", Method, 16}, + {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).Stat", Method, 16}, + {"(MapFS).Sub", Method, 16}, + {"MapFS", Type, 16}, + {"MapFile", Type, 16}, + {"MapFile.Data", Field, 16}, + {"MapFile.ModTime", Field, 16}, + {"MapFile.Mode", Field, 16}, + {"MapFile.Sys", Field, 16}, + {"TestFS", Func, 16}, + }, + "testing/iotest": { + {"DataErrReader", Func, 0}, + {"ErrReader", Func, 16}, + {"ErrTimeout", Var, 0}, + {"HalfReader", Func, 0}, + {"NewReadLogger", Func, 0}, + {"NewWriteLogger", Func, 0}, + {"OneByteReader", Func, 0}, + {"TestReader", Func, 16}, + {"TimeoutReader", Func, 0}, + {"TruncateWriter", Func, 0}, + }, + "testing/quick": { + {"(*CheckEqualError).Error", Method, 0}, + {"(*CheckError).Error", Method, 0}, + {"(SetupError).Error", Method, 0}, + {"Check", Func, 0}, + {"CheckEqual", Func, 0}, + {"CheckEqualError", Type, 0}, + {"CheckEqualError.CheckError", Field, 0}, + {"CheckEqualError.Out1", Field, 0}, + {"CheckEqualError.Out2", Field, 0}, + {"CheckError", Type, 0}, + {"CheckError.Count", Field, 0}, + {"CheckError.In", Field, 0}, + {"Config", Type, 0}, + {"Config.MaxCount", Field, 0}, + {"Config.MaxCountScale", Field, 0}, + {"Config.Rand", Field, 0}, + {"Config.Values", Field, 0}, + {"Generator", Type, 0}, + {"SetupError", Type, 0}, + {"Value", Func, 0}, + }, + "testing/slogtest": { + {"Run", Func, 22}, + {"TestHandler", Func, 21}, + }, + "text/scanner": { + {"(*Position).IsValid", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).IsValid", Method, 0}, + {"(*Scanner).Next", Method, 0}, + {"(*Scanner).Peek", Method, 0}, + {"(*Scanner).Pos", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(*Scanner).TokenText", Method, 0}, + {"(Position).String", Method, 0}, + {"(Scanner).String", Method, 0}, + {"Char", Const, 0}, + {"Comment", Const, 0}, + {"EOF", Const, 0}, + {"Float", Const, 0}, + {"GoTokens", Const, 0}, + {"GoWhitespace", Const, 0}, + {"Ident", Const, 0}, + {"Int", Const, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"RawString", Const, 0}, + {"ScanChars", Const, 0}, + {"ScanComments", Const, 0}, + {"ScanFloats", Const, 0}, + {"ScanIdents", Const, 0}, + {"ScanInts", Const, 0}, + {"ScanRawStrings", Const, 0}, + {"ScanStrings", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.Error", Field, 0}, + {"Scanner.ErrorCount", Field, 0}, + {"Scanner.IsIdentRune", Field, 4}, + {"Scanner.Mode", Field, 0}, + {"Scanner.Position", Field, 0}, + {"Scanner.Whitespace", Field, 0}, + {"SkipComments", Const, 0}, + {"String", Const, 0}, + {"TokenString", Func, 0}, + }, + "text/tabwriter": { + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Init", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"AlignRight", Const, 0}, + {"Debug", Const, 0}, + {"DiscardEmptyColumns", Const, 0}, + {"Escape", Const, 0}, + {"FilterHTML", Const, 0}, + {"NewWriter", Func, 0}, + {"StripEscape", Const, 0}, + {"TabIndent", Const, 0}, + {"Writer", Type, 0}, + }, + "text/template": { + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 5}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"(ExecError).Error", Method, 6}, + {"(ExecError).Unwrap", Method, 13}, + {"(Template).Copy", Method, 2}, + {"(Template).ErrorContext", Method, 1}, + {"ExecError", Type, 6}, + {"ExecError.Err", Field, 6}, + {"ExecError.Name", Field, 6}, + {"FuncMap", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Template", Type, 0}, + {"Template.Tree", Field, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "text/template/parse": { + {"(*ActionNode).Copy", Method, 0}, + {"(*ActionNode).String", Method, 0}, + {"(*BoolNode).Copy", Method, 0}, + {"(*BoolNode).String", Method, 0}, + {"(*BranchNode).Copy", Method, 4}, + {"(*BranchNode).String", Method, 0}, + {"(*BreakNode).Copy", Method, 18}, + {"(*BreakNode).String", Method, 18}, + {"(*ChainNode).Add", Method, 1}, + {"(*ChainNode).Copy", Method, 1}, + {"(*ChainNode).String", Method, 1}, + {"(*CommandNode).Copy", Method, 0}, + {"(*CommandNode).String", Method, 0}, + {"(*CommentNode).Copy", Method, 16}, + {"(*CommentNode).String", Method, 16}, + {"(*ContinueNode).Copy", Method, 18}, + {"(*ContinueNode).String", Method, 18}, + {"(*DotNode).Copy", Method, 0}, + {"(*DotNode).String", Method, 0}, + {"(*DotNode).Type", Method, 0}, + {"(*FieldNode).Copy", Method, 0}, + {"(*FieldNode).String", Method, 0}, + {"(*IdentifierNode).Copy", Method, 0}, + {"(*IdentifierNode).SetPos", Method, 1}, + {"(*IdentifierNode).SetTree", Method, 4}, + {"(*IdentifierNode).String", Method, 0}, + {"(*IfNode).Copy", Method, 0}, + {"(*IfNode).String", Method, 0}, + {"(*ListNode).Copy", Method, 0}, + {"(*ListNode).CopyList", Method, 0}, + {"(*ListNode).String", Method, 0}, + {"(*NilNode).Copy", Method, 1}, + {"(*NilNode).String", Method, 1}, + {"(*NilNode).Type", Method, 1}, + {"(*NumberNode).Copy", Method, 0}, + {"(*NumberNode).String", Method, 0}, + {"(*PipeNode).Copy", Method, 0}, + {"(*PipeNode).CopyPipe", Method, 0}, + {"(*PipeNode).String", Method, 0}, + {"(*RangeNode).Copy", Method, 0}, + {"(*RangeNode).String", Method, 0}, + {"(*StringNode).Copy", Method, 0}, + {"(*StringNode).String", Method, 0}, + {"(*TemplateNode).Copy", Method, 0}, + {"(*TemplateNode).String", Method, 0}, + {"(*TextNode).Copy", Method, 0}, + {"(*TextNode).String", Method, 0}, + {"(*Tree).Copy", Method, 2}, + {"(*Tree).ErrorContext", Method, 1}, + {"(*Tree).Parse", Method, 0}, + {"(*VariableNode).Copy", Method, 0}, + {"(*VariableNode).String", Method, 0}, + {"(*WithNode).Copy", Method, 0}, + {"(*WithNode).String", Method, 0}, + {"(ActionNode).Position", Method, 1}, + {"(ActionNode).Type", Method, 0}, + {"(BoolNode).Position", Method, 1}, + {"(BoolNode).Type", Method, 0}, + {"(BranchNode).Position", Method, 1}, + {"(BranchNode).Type", Method, 0}, + {"(BreakNode).Position", Method, 18}, + {"(BreakNode).Type", Method, 18}, + {"(ChainNode).Position", Method, 1}, + {"(ChainNode).Type", Method, 1}, + {"(CommandNode).Position", Method, 1}, + {"(CommandNode).Type", Method, 0}, + {"(CommentNode).Position", Method, 16}, + {"(CommentNode).Type", Method, 16}, + {"(ContinueNode).Position", Method, 18}, + {"(ContinueNode).Type", Method, 18}, + {"(DotNode).Position", Method, 1}, + {"(FieldNode).Position", Method, 1}, + {"(FieldNode).Type", Method, 0}, + {"(IdentifierNode).Position", Method, 1}, + {"(IdentifierNode).Type", Method, 0}, + {"(IfNode).Position", Method, 1}, + {"(IfNode).Type", Method, 0}, + {"(ListNode).Position", Method, 1}, + {"(ListNode).Type", Method, 0}, + {"(NilNode).Position", Method, 1}, + {"(NodeType).Type", Method, 0}, + {"(NumberNode).Position", Method, 1}, + {"(NumberNode).Type", Method, 0}, + {"(PipeNode).Position", Method, 1}, + {"(PipeNode).Type", Method, 0}, + {"(Pos).Position", Method, 1}, + {"(RangeNode).Position", Method, 1}, + {"(RangeNode).Type", Method, 0}, + {"(StringNode).Position", Method, 1}, + {"(StringNode).Type", Method, 0}, + {"(TemplateNode).Position", Method, 1}, + {"(TemplateNode).Type", Method, 0}, + {"(TextNode).Position", Method, 1}, + {"(TextNode).Type", Method, 0}, + {"(VariableNode).Position", Method, 1}, + {"(VariableNode).Type", Method, 0}, + {"(WithNode).Position", Method, 1}, + {"(WithNode).Type", Method, 0}, + {"ActionNode", Type, 0}, + {"ActionNode.Line", Field, 0}, + {"ActionNode.NodeType", Field, 0}, + {"ActionNode.Pipe", Field, 0}, + {"ActionNode.Pos", Field, 1}, + {"BoolNode", Type, 0}, + {"BoolNode.NodeType", Field, 0}, + {"BoolNode.Pos", Field, 1}, + {"BoolNode.True", Field, 0}, + {"BranchNode", Type, 0}, + {"BranchNode.ElseList", Field, 0}, + {"BranchNode.Line", Field, 0}, + {"BranchNode.List", Field, 0}, + {"BranchNode.NodeType", Field, 0}, + {"BranchNode.Pipe", Field, 0}, + {"BranchNode.Pos", Field, 1}, + {"BreakNode", Type, 18}, + {"BreakNode.Line", Field, 18}, + {"BreakNode.NodeType", Field, 18}, + {"BreakNode.Pos", Field, 18}, + {"ChainNode", Type, 1}, + {"ChainNode.Field", Field, 1}, + {"ChainNode.Node", Field, 1}, + {"ChainNode.NodeType", Field, 1}, + {"ChainNode.Pos", Field, 1}, + {"CommandNode", Type, 0}, + {"CommandNode.Args", Field, 0}, + {"CommandNode.NodeType", Field, 0}, + {"CommandNode.Pos", Field, 1}, + {"CommentNode", Type, 16}, + {"CommentNode.NodeType", Field, 16}, + {"CommentNode.Pos", Field, 16}, + {"CommentNode.Text", Field, 16}, + {"ContinueNode", Type, 18}, + {"ContinueNode.Line", Field, 18}, + {"ContinueNode.NodeType", Field, 18}, + {"ContinueNode.Pos", Field, 18}, + {"DotNode", Type, 0}, + {"DotNode.NodeType", Field, 4}, + {"DotNode.Pos", Field, 1}, + {"FieldNode", Type, 0}, + {"FieldNode.Ident", Field, 0}, + {"FieldNode.NodeType", Field, 0}, + {"FieldNode.Pos", Field, 1}, + {"IdentifierNode", Type, 0}, + {"IdentifierNode.Ident", Field, 0}, + {"IdentifierNode.NodeType", Field, 0}, + {"IdentifierNode.Pos", Field, 1}, + {"IfNode", Type, 0}, + {"IfNode.BranchNode", Field, 0}, + {"IsEmptyTree", Func, 0}, + {"ListNode", Type, 0}, + {"ListNode.NodeType", Field, 0}, + {"ListNode.Nodes", Field, 0}, + {"ListNode.Pos", Field, 1}, + {"Mode", Type, 16}, + {"New", Func, 0}, + {"NewIdentifier", Func, 0}, + {"NilNode", Type, 1}, + {"NilNode.NodeType", Field, 4}, + {"NilNode.Pos", Field, 1}, + {"Node", Type, 0}, + {"NodeAction", Const, 0}, + {"NodeBool", Const, 0}, + {"NodeBreak", Const, 18}, + {"NodeChain", Const, 1}, + {"NodeCommand", Const, 0}, + {"NodeComment", Const, 16}, + {"NodeContinue", Const, 18}, + {"NodeDot", Const, 0}, + {"NodeField", Const, 0}, + {"NodeIdentifier", Const, 0}, + {"NodeIf", Const, 0}, + {"NodeList", Const, 0}, + {"NodeNil", Const, 1}, + {"NodeNumber", Const, 0}, + {"NodePipe", Const, 0}, + {"NodeRange", Const, 0}, + {"NodeString", Const, 0}, + {"NodeTemplate", Const, 0}, + {"NodeText", Const, 0}, + {"NodeType", Type, 0}, + {"NodeVariable", Const, 0}, + {"NodeWith", Const, 0}, + {"NumberNode", Type, 0}, + {"NumberNode.Complex128", Field, 0}, + {"NumberNode.Float64", Field, 0}, + {"NumberNode.Int64", Field, 0}, + {"NumberNode.IsComplex", Field, 0}, + {"NumberNode.IsFloat", Field, 0}, + {"NumberNode.IsInt", Field, 0}, + {"NumberNode.IsUint", Field, 0}, + {"NumberNode.NodeType", Field, 0}, + {"NumberNode.Pos", Field, 1}, + {"NumberNode.Text", Field, 0}, + {"NumberNode.Uint64", Field, 0}, + {"Parse", Func, 0}, + {"ParseComments", Const, 16}, + {"PipeNode", Type, 0}, + {"PipeNode.Cmds", Field, 0}, + {"PipeNode.Decl", Field, 0}, + {"PipeNode.IsAssign", Field, 11}, + {"PipeNode.Line", Field, 0}, + {"PipeNode.NodeType", Field, 0}, + {"PipeNode.Pos", Field, 1}, + {"Pos", Type, 1}, + {"RangeNode", Type, 0}, + {"RangeNode.BranchNode", Field, 0}, + {"SkipFuncCheck", Const, 17}, + {"StringNode", Type, 0}, + {"StringNode.NodeType", Field, 0}, + {"StringNode.Pos", Field, 1}, + {"StringNode.Quoted", Field, 0}, + {"StringNode.Text", Field, 0}, + {"TemplateNode", Type, 0}, + {"TemplateNode.Line", Field, 0}, + {"TemplateNode.Name", Field, 0}, + {"TemplateNode.NodeType", Field, 0}, + {"TemplateNode.Pipe", Field, 0}, + {"TemplateNode.Pos", Field, 1}, + {"TextNode", Type, 0}, + {"TextNode.NodeType", Field, 0}, + {"TextNode.Pos", Field, 1}, + {"TextNode.Text", Field, 0}, + {"Tree", Type, 0}, + {"Tree.Mode", Field, 16}, + {"Tree.Name", Field, 0}, + {"Tree.ParseName", Field, 1}, + {"Tree.Root", Field, 0}, + {"VariableNode", Type, 0}, + {"VariableNode.Ident", Field, 0}, + {"VariableNode.NodeType", Field, 0}, + {"VariableNode.Pos", Field, 1}, + {"WithNode", Type, 0}, + {"WithNode.BranchNode", Field, 0}, + }, + "time": { + {"(*Location).String", Method, 0}, + {"(*ParseError).Error", Method, 0}, + {"(*Ticker).Reset", Method, 15}, + {"(*Ticker).Stop", Method, 0}, + {"(*Time).GobDecode", Method, 0}, + {"(*Time).UnmarshalBinary", Method, 2}, + {"(*Time).UnmarshalJSON", Method, 0}, + {"(*Time).UnmarshalText", Method, 2}, + {"(*Timer).Reset", Method, 1}, + {"(*Timer).Stop", Method, 0}, + {"(Duration).Abs", Method, 19}, + {"(Duration).Hours", Method, 0}, + {"(Duration).Microseconds", Method, 13}, + {"(Duration).Milliseconds", Method, 13}, + {"(Duration).Minutes", Method, 0}, + {"(Duration).Nanoseconds", Method, 0}, + {"(Duration).Round", Method, 9}, + {"(Duration).Seconds", Method, 0}, + {"(Duration).String", Method, 0}, + {"(Duration).Truncate", Method, 9}, + {"(Month).String", Method, 0}, + {"(Time).Add", Method, 0}, + {"(Time).AddDate", Method, 0}, + {"(Time).After", Method, 0}, + {"(Time).AppendFormat", Method, 5}, + {"(Time).Before", Method, 0}, + {"(Time).Clock", Method, 0}, + {"(Time).Compare", Method, 20}, + {"(Time).Date", Method, 0}, + {"(Time).Day", Method, 0}, + {"(Time).Equal", Method, 0}, + {"(Time).Format", Method, 0}, + {"(Time).GoString", Method, 17}, + {"(Time).GobEncode", Method, 0}, + {"(Time).Hour", Method, 0}, + {"(Time).ISOWeek", Method, 0}, + {"(Time).In", Method, 0}, + {"(Time).IsDST", Method, 17}, + {"(Time).IsZero", Method, 0}, + {"(Time).Local", Method, 0}, + {"(Time).Location", Method, 0}, + {"(Time).MarshalBinary", Method, 2}, + {"(Time).MarshalJSON", Method, 0}, + {"(Time).MarshalText", Method, 2}, + {"(Time).Minute", Method, 0}, + {"(Time).Month", Method, 0}, + {"(Time).Nanosecond", Method, 0}, + {"(Time).Round", Method, 1}, + {"(Time).Second", Method, 0}, + {"(Time).String", Method, 0}, + {"(Time).Sub", Method, 0}, + {"(Time).Truncate", Method, 1}, + {"(Time).UTC", Method, 0}, + {"(Time).Unix", Method, 0}, + {"(Time).UnixMicro", Method, 17}, + {"(Time).UnixMilli", Method, 17}, + {"(Time).UnixNano", Method, 0}, + {"(Time).Weekday", Method, 0}, + {"(Time).Year", Method, 0}, + {"(Time).YearDay", Method, 1}, + {"(Time).Zone", Method, 0}, + {"(Time).ZoneBounds", Method, 19}, + {"(Weekday).String", Method, 0}, + {"ANSIC", Const, 0}, + {"After", Func, 0}, + {"AfterFunc", Func, 0}, + {"April", Const, 0}, + {"August", Const, 0}, + {"Date", Func, 0}, + {"DateOnly", Const, 20}, + {"DateTime", Const, 20}, + {"December", Const, 0}, + {"Duration", Type, 0}, + {"February", Const, 0}, + {"FixedZone", Func, 0}, + {"Friday", Const, 0}, + {"Hour", Const, 0}, + {"January", Const, 0}, + {"July", Const, 0}, + {"June", Const, 0}, + {"Kitchen", Const, 0}, + {"Layout", Const, 17}, + {"LoadLocation", Func, 0}, + {"LoadLocationFromTZData", Func, 10}, + {"Local", Var, 0}, + {"Location", Type, 0}, + {"March", Const, 0}, + {"May", Const, 0}, + {"Microsecond", Const, 0}, + {"Millisecond", Const, 0}, + {"Minute", Const, 0}, + {"Monday", Const, 0}, + {"Month", Type, 0}, + {"Nanosecond", Const, 0}, + {"NewTicker", Func, 0}, + {"NewTimer", Func, 0}, + {"November", Const, 0}, + {"Now", Func, 0}, + {"October", Const, 0}, + {"Parse", Func, 0}, + {"ParseDuration", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Layout", Field, 0}, + {"ParseError.LayoutElem", Field, 0}, + {"ParseError.Message", Field, 0}, + {"ParseError.Value", Field, 0}, + {"ParseError.ValueElem", Field, 0}, + {"ParseInLocation", Func, 1}, + {"RFC1123", Const, 0}, + {"RFC1123Z", Const, 0}, + {"RFC3339", Const, 0}, + {"RFC3339Nano", Const, 0}, + {"RFC822", Const, 0}, + {"RFC822Z", Const, 0}, + {"RFC850", Const, 0}, + {"RubyDate", Const, 0}, + {"Saturday", Const, 0}, + {"Second", Const, 0}, + {"September", Const, 0}, + {"Since", Func, 0}, + {"Sleep", Func, 0}, + {"Stamp", Const, 0}, + {"StampMicro", Const, 0}, + {"StampMilli", Const, 0}, + {"StampNano", Const, 0}, + {"Sunday", Const, 0}, + {"Thursday", Const, 0}, + {"Tick", Func, 0}, + {"Ticker", Type, 0}, + {"Ticker.C", Field, 0}, + {"Time", Type, 0}, + {"TimeOnly", Const, 20}, + {"Timer", Type, 0}, + {"Timer.C", Field, 0}, + {"Tuesday", Const, 0}, + {"UTC", Var, 0}, + {"Unix", Func, 0}, + {"UnixDate", Const, 0}, + {"UnixMicro", Func, 17}, + {"UnixMilli", Func, 17}, + {"Until", Func, 8}, + {"Wednesday", Const, 0}, + {"Weekday", Type, 0}, + }, + "unicode": { + {"(SpecialCase).ToLower", Method, 0}, + {"(SpecialCase).ToTitle", Method, 0}, + {"(SpecialCase).ToUpper", Method, 0}, + {"ASCII_Hex_Digit", Var, 0}, + {"Adlam", Var, 7}, + {"Ahom", Var, 5}, + {"Anatolian_Hieroglyphs", Var, 5}, + {"Arabic", Var, 0}, + {"Armenian", Var, 0}, + {"Avestan", Var, 0}, + {"AzeriCase", Var, 0}, + {"Balinese", Var, 0}, + {"Bamum", Var, 0}, + {"Bassa_Vah", Var, 4}, + {"Batak", Var, 0}, + {"Bengali", Var, 0}, + {"Bhaiksuki", Var, 7}, + {"Bidi_Control", Var, 0}, + {"Bopomofo", Var, 0}, + {"Brahmi", Var, 0}, + {"Braille", Var, 0}, + {"Buginese", Var, 0}, + {"Buhid", Var, 0}, + {"C", Var, 0}, + {"Canadian_Aboriginal", Var, 0}, + {"Carian", Var, 0}, + {"CaseRange", Type, 0}, + {"CaseRange.Delta", Field, 0}, + {"CaseRange.Hi", Field, 0}, + {"CaseRange.Lo", Field, 0}, + {"CaseRanges", Var, 0}, + {"Categories", Var, 0}, + {"Caucasian_Albanian", Var, 4}, + {"Cc", Var, 0}, + {"Cf", Var, 0}, + {"Chakma", Var, 1}, + {"Cham", Var, 0}, + {"Cherokee", Var, 0}, + {"Chorasmian", Var, 16}, + {"Co", Var, 0}, + {"Common", Var, 0}, + {"Coptic", Var, 0}, + {"Cs", Var, 0}, + {"Cuneiform", Var, 0}, + {"Cypriot", Var, 0}, + {"Cypro_Minoan", Var, 21}, + {"Cyrillic", Var, 0}, + {"Dash", Var, 0}, + {"Deprecated", Var, 0}, + {"Deseret", Var, 0}, + {"Devanagari", Var, 0}, + {"Diacritic", Var, 0}, + {"Digit", Var, 0}, + {"Dives_Akuru", Var, 16}, + {"Dogra", Var, 13}, + {"Duployan", Var, 4}, + {"Egyptian_Hieroglyphs", Var, 0}, + {"Elbasan", Var, 4}, + {"Elymaic", Var, 14}, + {"Ethiopic", Var, 0}, + {"Extender", Var, 0}, + {"FoldCategory", Var, 0}, + {"FoldScript", Var, 0}, + {"Georgian", Var, 0}, + {"Glagolitic", Var, 0}, + {"Gothic", Var, 0}, + {"Grantha", Var, 4}, + {"GraphicRanges", Var, 0}, + {"Greek", Var, 0}, + {"Gujarati", Var, 0}, + {"Gunjala_Gondi", Var, 13}, + {"Gurmukhi", Var, 0}, + {"Han", Var, 0}, + {"Hangul", Var, 0}, + {"Hanifi_Rohingya", Var, 13}, + {"Hanunoo", Var, 0}, + {"Hatran", Var, 5}, + {"Hebrew", Var, 0}, + {"Hex_Digit", Var, 0}, + {"Hiragana", Var, 0}, + {"Hyphen", Var, 0}, + {"IDS_Binary_Operator", Var, 0}, + {"IDS_Trinary_Operator", Var, 0}, + {"Ideographic", Var, 0}, + {"Imperial_Aramaic", Var, 0}, + {"In", Func, 2}, + {"Inherited", Var, 0}, + {"Inscriptional_Pahlavi", Var, 0}, + {"Inscriptional_Parthian", Var, 0}, + {"Is", Func, 0}, + {"IsControl", Func, 0}, + {"IsDigit", Func, 0}, + {"IsGraphic", Func, 0}, + {"IsLetter", Func, 0}, + {"IsLower", Func, 0}, + {"IsMark", Func, 0}, + {"IsNumber", Func, 0}, + {"IsOneOf", Func, 0}, + {"IsPrint", Func, 0}, + {"IsPunct", Func, 0}, + {"IsSpace", Func, 0}, + {"IsSymbol", Func, 0}, + {"IsTitle", Func, 0}, + {"IsUpper", Func, 0}, + {"Javanese", Var, 0}, + {"Join_Control", Var, 0}, + {"Kaithi", Var, 0}, + {"Kannada", Var, 0}, + {"Katakana", Var, 0}, + {"Kawi", Var, 21}, + {"Kayah_Li", Var, 0}, + {"Kharoshthi", Var, 0}, + {"Khitan_Small_Script", Var, 16}, + {"Khmer", Var, 0}, + {"Khojki", Var, 4}, + {"Khudawadi", Var, 4}, + {"L", Var, 0}, + {"Lao", Var, 0}, + {"Latin", Var, 0}, + {"Lepcha", Var, 0}, + {"Letter", Var, 0}, + {"Limbu", Var, 0}, + {"Linear_A", Var, 4}, + {"Linear_B", Var, 0}, + {"Lisu", Var, 0}, + {"Ll", Var, 0}, + {"Lm", Var, 0}, + {"Lo", Var, 0}, + {"Logical_Order_Exception", Var, 0}, + {"Lower", Var, 0}, + {"LowerCase", Const, 0}, + {"Lt", Var, 0}, + {"Lu", Var, 0}, + {"Lycian", Var, 0}, + {"Lydian", Var, 0}, + {"M", Var, 0}, + {"Mahajani", Var, 4}, + {"Makasar", Var, 13}, + {"Malayalam", Var, 0}, + {"Mandaic", Var, 0}, + {"Manichaean", Var, 4}, + {"Marchen", Var, 7}, + {"Mark", Var, 0}, + {"Masaram_Gondi", Var, 10}, + {"MaxASCII", Const, 0}, + {"MaxCase", Const, 0}, + {"MaxLatin1", Const, 0}, + {"MaxRune", Const, 0}, + {"Mc", Var, 0}, + {"Me", Var, 0}, + {"Medefaidrin", Var, 13}, + {"Meetei_Mayek", Var, 0}, + {"Mende_Kikakui", Var, 4}, + {"Meroitic_Cursive", Var, 1}, + {"Meroitic_Hieroglyphs", Var, 1}, + {"Miao", Var, 1}, + {"Mn", Var, 0}, + {"Modi", Var, 4}, + {"Mongolian", Var, 0}, + {"Mro", Var, 4}, + {"Multani", Var, 5}, + {"Myanmar", Var, 0}, + {"N", Var, 0}, + {"Nabataean", Var, 4}, + {"Nag_Mundari", Var, 21}, + {"Nandinagari", Var, 14}, + {"Nd", Var, 0}, + {"New_Tai_Lue", Var, 0}, + {"Newa", Var, 7}, + {"Nko", Var, 0}, + {"Nl", Var, 0}, + {"No", Var, 0}, + {"Noncharacter_Code_Point", Var, 0}, + {"Number", Var, 0}, + {"Nushu", Var, 10}, + {"Nyiakeng_Puachue_Hmong", Var, 14}, + {"Ogham", Var, 0}, + {"Ol_Chiki", Var, 0}, + {"Old_Hungarian", Var, 5}, + {"Old_Italic", Var, 0}, + {"Old_North_Arabian", Var, 4}, + {"Old_Permic", Var, 4}, + {"Old_Persian", Var, 0}, + {"Old_Sogdian", Var, 13}, + {"Old_South_Arabian", Var, 0}, + {"Old_Turkic", Var, 0}, + {"Old_Uyghur", Var, 21}, + {"Oriya", Var, 0}, + {"Osage", Var, 7}, + {"Osmanya", Var, 0}, + {"Other", Var, 0}, + {"Other_Alphabetic", Var, 0}, + {"Other_Default_Ignorable_Code_Point", Var, 0}, + {"Other_Grapheme_Extend", Var, 0}, + {"Other_ID_Continue", Var, 0}, + {"Other_ID_Start", Var, 0}, + {"Other_Lowercase", Var, 0}, + {"Other_Math", Var, 0}, + {"Other_Uppercase", Var, 0}, + {"P", Var, 0}, + {"Pahawh_Hmong", Var, 4}, + {"Palmyrene", Var, 4}, + {"Pattern_Syntax", Var, 0}, + {"Pattern_White_Space", Var, 0}, + {"Pau_Cin_Hau", Var, 4}, + {"Pc", Var, 0}, + {"Pd", Var, 0}, + {"Pe", Var, 0}, + {"Pf", Var, 0}, + {"Phags_Pa", Var, 0}, + {"Phoenician", Var, 0}, + {"Pi", Var, 0}, + {"Po", Var, 0}, + {"Prepended_Concatenation_Mark", Var, 7}, + {"PrintRanges", Var, 0}, + {"Properties", Var, 0}, + {"Ps", Var, 0}, + {"Psalter_Pahlavi", Var, 4}, + {"Punct", Var, 0}, + {"Quotation_Mark", Var, 0}, + {"Radical", Var, 0}, + {"Range16", Type, 0}, + {"Range16.Hi", Field, 0}, + {"Range16.Lo", Field, 0}, + {"Range16.Stride", Field, 0}, + {"Range32", Type, 0}, + {"Range32.Hi", Field, 0}, + {"Range32.Lo", Field, 0}, + {"Range32.Stride", Field, 0}, + {"RangeTable", Type, 0}, + {"RangeTable.LatinOffset", Field, 1}, + {"RangeTable.R16", Field, 0}, + {"RangeTable.R32", Field, 0}, + {"Regional_Indicator", Var, 10}, + {"Rejang", Var, 0}, + {"ReplacementChar", Const, 0}, + {"Runic", Var, 0}, + {"S", Var, 0}, + {"STerm", Var, 0}, + {"Samaritan", Var, 0}, + {"Saurashtra", Var, 0}, + {"Sc", Var, 0}, + {"Scripts", Var, 0}, + {"Sentence_Terminal", Var, 7}, + {"Sharada", Var, 1}, + {"Shavian", Var, 0}, + {"Siddham", Var, 4}, + {"SignWriting", Var, 5}, + {"SimpleFold", Func, 0}, + {"Sinhala", Var, 0}, + {"Sk", Var, 0}, + {"Sm", Var, 0}, + {"So", Var, 0}, + {"Soft_Dotted", Var, 0}, + {"Sogdian", Var, 13}, + {"Sora_Sompeng", Var, 1}, + {"Soyombo", Var, 10}, + {"Space", Var, 0}, + {"SpecialCase", Type, 0}, + {"Sundanese", Var, 0}, + {"Syloti_Nagri", Var, 0}, + {"Symbol", Var, 0}, + {"Syriac", Var, 0}, + {"Tagalog", Var, 0}, + {"Tagbanwa", Var, 0}, + {"Tai_Le", Var, 0}, + {"Tai_Tham", Var, 0}, + {"Tai_Viet", Var, 0}, + {"Takri", Var, 1}, + {"Tamil", Var, 0}, + {"Tangsa", Var, 21}, + {"Tangut", Var, 7}, + {"Telugu", Var, 0}, + {"Terminal_Punctuation", Var, 0}, + {"Thaana", Var, 0}, + {"Thai", Var, 0}, + {"Tibetan", Var, 0}, + {"Tifinagh", Var, 0}, + {"Tirhuta", Var, 4}, + {"Title", Var, 0}, + {"TitleCase", Const, 0}, + {"To", Func, 0}, + {"ToLower", Func, 0}, + {"ToTitle", Func, 0}, + {"ToUpper", Func, 0}, + {"Toto", Var, 21}, + {"TurkishCase", Var, 0}, + {"Ugaritic", Var, 0}, + {"Unified_Ideograph", Var, 0}, + {"Upper", Var, 0}, + {"UpperCase", Const, 0}, + {"UpperLower", Const, 0}, + {"Vai", Var, 0}, + {"Variation_Selector", Var, 0}, + {"Version", Const, 0}, + {"Vithkuqi", Var, 21}, + {"Wancho", Var, 14}, + {"Warang_Citi", Var, 4}, + {"White_Space", Var, 0}, + {"Yezidi", Var, 16}, + {"Yi", Var, 0}, + {"Z", Var, 0}, + {"Zanabazar_Square", Var, 10}, + {"Zl", Var, 0}, + {"Zp", Var, 0}, + {"Zs", Var, 0}, + }, + "unicode/utf16": { + {"AppendRune", Func, 20}, + {"Decode", Func, 0}, + {"DecodeRune", Func, 0}, + {"Encode", Func, 0}, + {"EncodeRune", Func, 0}, + {"IsSurrogate", Func, 0}, + }, + "unicode/utf8": { + {"AppendRune", Func, 18}, + {"DecodeLastRune", Func, 0}, + {"DecodeLastRuneInString", Func, 0}, + {"DecodeRune", Func, 0}, + {"DecodeRuneInString", Func, 0}, + {"EncodeRune", Func, 0}, + {"FullRune", Func, 0}, + {"FullRuneInString", Func, 0}, + {"MaxRune", Const, 0}, + {"RuneCount", Func, 0}, + {"RuneCountInString", Func, 0}, + {"RuneError", Const, 0}, + {"RuneLen", Func, 0}, + {"RuneSelf", Const, 0}, + {"RuneStart", Func, 0}, + {"UTFMax", Const, 0}, + {"Valid", Func, 0}, + {"ValidRune", Func, 1}, + {"ValidString", Func, 0}, + }, + "unsafe": { + {"Add", Func, 0}, + {"Alignof", Func, 0}, + {"Offsetof", Func, 0}, + {"Pointer", Type, 0}, + {"Sizeof", Func, 0}, + {"Slice", Func, 0}, + {"SliceData", Func, 0}, + {"String", Func, 0}, + {"StringData", Func, 0}, + }, +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go new file mode 100644 index 00000000..98904017 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -0,0 +1,97 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run generate.go + +// Package stdlib provides a table of all exported symbols in the +// standard library, along with the version at which they first +// appeared. +package stdlib + +import ( + "fmt" + "strings" +) + +type Symbol struct { + Name string + Kind Kind + Version Version // Go version that first included the symbol +} + +// A Kind indicates the kind of a symbol: +// function, variable, constant, type, and so on. +type Kind int8 + +const ( + Invalid Kind = iota // Example name: + Type // "Buffer" + Func // "Println" + Var // "EOF" + Const // "Pi" + Field // "Point.X" + Method // "(*Buffer).Grow" +) + +func (kind Kind) String() string { + return [...]string{ + Invalid: "invalid", + Type: "type", + Func: "func", + Var: "var", + Const: "const", + Field: "field", + Method: "method", + }[kind] +} + +// A Version represents a version of Go of the form "go1.%d". +type Version int8 + +// String returns a version string of the form "go1.23", without allocating. +func (v Version) String() string { return versions[v] } + +var versions [30]string // (increase constant as needed) + +func init() { + for i := range versions { + versions[i] = fmt.Sprintf("go1.%d", i) + } +} + +// HasPackage reports whether the specified package path is part of +// the standard library's public API. +func HasPackage(path string) bool { + _, ok := PackageSymbols[path] + return ok +} + +// SplitField splits the field symbol name into type and field +// components. It must be called only on Field symbols. +// +// Example: "File.Package" -> ("File", "Package") +func (sym *Symbol) SplitField() (typename, name string) { + if sym.Kind != Field { + panic("not a field") + } + typename, name, _ = strings.Cut(sym.Name, ".") + return +} + +// SplitMethod splits the method symbol name into pointer, receiver, +// and method components. It must be called only on Method symbols. +// +// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow") +func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) { + if sym.Kind != Method { + panic("not a method") + } + recv, name, _ = strings.Cut(sym.Name, ".") + recv = recv[len("(") : len(recv)-len(")")] + ptr = recv[0] == '*' + if ptr { + recv = recv[len("*"):] + } + return +} diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index 7e638ec2..ff9437a3 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -34,30 +34,16 @@ func GetLines(file *token.File) []int { lines []int _ []struct{} } - type tokenFile118 struct { - _ *token.FileSet // deleted in go1.19 - tokenFile119 - } - - type uP = unsafe.Pointer - switch unsafe.Sizeof(*file) { - case unsafe.Sizeof(tokenFile118{}): - var ptr *tokenFile118 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - case unsafe.Sizeof(tokenFile119{}): - var ptr *tokenFile119 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - default: + if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { panic("unexpected token.File size") } + var ptr *tokenFile119 + type uP = unsafe.Pointer + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines } // AddExistingFiles adds the specified files to the FileSet if they diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go deleted file mode 100644 index d0d0649f..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeparams contains common utilities for writing tools that interact -// with generic Go code, as introduced with Go 1.18. -// -// Many of the types and functions in this package are proxies for the new APIs -// introduced in the standard library with Go 1.18. For example, the -// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec -// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go -// versions older than 1.18 these helpers are implemented as stubs, allowing -// users of this package to write code that handles generic constructs inline, -// even if the Go version being used to compile does not support generics. -// -// Additionally, this package contains common utilities for working with the -// new generic constructs, to supplement the standard library APIs. Notably, -// the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. -// -// An external version of these APIs is available in the -// golang.org/x/exp/typeparams module. -package typeparams - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" -) - -// UnpackIndexExpr extracts data from AST nodes that represent index -// expressions. -// -// For an ast.IndexExpr, the resulting indices slice will contain exactly one -// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable -// number of index expressions. -// -// For nodes that don't represent index expressions, the first return value of -// UnpackIndexExpr will be nil. -func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { - switch e := n.(type) { - case *ast.IndexExpr: - return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: - return e.X, e.Lbrack, e.Indices, e.Rbrack - } - return nil, token.NoPos, nil, token.NoPos -} - -// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on -// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 -// will panic. -func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { - switch len(indices) { - case 0: - panic("empty indices") - case 1: - return &ast.IndexExpr{ - X: x, - Lbrack: lbrack, - Index: indices[0], - Rbrack: rbrack, - } - default: - return &IndexListExpr{ - X: x, - Lbrack: lbrack, - Indices: indices, - Rbrack: rbrack, - } - } -} - -// IsTypeParam reports whether t is a type parameter. -func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) - return ok -} - -// OriginMethod returns the origin method associated with the method fn. -// For methods on a non-generic receiver base type, this is just -// fn. However, for methods with a generic receiver, OriginMethod returns the -// corresponding method in the method set of the origin type. -// -// As a special case, if fn is not a method (has no receiver), OriginMethod -// returns fn. -func OriginMethod(fn *types.Func) *types.Func { - recv := fn.Type().(*types.Signature).Recv() - if recv == nil { - return fn - } - base := recv.Type() - p, isPtr := base.(*types.Pointer) - if isPtr { - base = p.Elem() - } - named, isNamed := base.(*types.Named) - if !isNamed { - // Receiver is a *types.Interface. - return fn - } - if ForNamed(named).Len() == 0 { - // Receiver base has no type parameters, so we can avoid the lookup below. - return fn - } - orig := NamedTypeOrigin(named) - gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) - - // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: - // package p - // type T *int - // func (*T) f() {} - // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. - // Here we make them consistent by force. - // (The go/types bug is general, but this workaround is reached only - // for generic T thanks to the early return above.) - if gfn == nil { - mset := types.NewMethodSet(types.NewPointer(orig)) - for i := 0; i < mset.Len(); i++ { - m := mset.At(i) - if m.Obj().Id() == fn.Id() { - gfn = m.Obj() - break - } - } - } - - // In golang/go#61196, we observe another crash, this time inexplicable. - if gfn == nil { - panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) - } - - return gfn.(*types.Func) -} - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go deleted file mode 100644 index 71248209..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "go/types" -) - -// CoreType returns the core type of T or nil if T does not have a core type. -// -// See https://go.dev/ref/spec#Core_types for the definition of a core type. -func CoreType(T types.Type) types.Type { - U := T.Underlying() - if _, ok := U.(*types.Interface); !ok { - return U // for non-interface types, - } - - terms, err := _NormalTerms(U) - if len(terms) == 0 || err != nil { - // len(terms) -> empty type set of interface. - // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. - return nil // no core type. - } - - U = terms[0].Type().Underlying() - var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) - for identical = 1; identical < len(terms); identical++ { - if !types.Identical(U, terms[identical].Type().Underlying()) { - break - } - } - - if identical == len(terms) { - // https://go.dev/ref/spec#Core_types - // "There is a single type U which is the underlying type of all types in the type set of T" - return U - } - ch, ok := U.(*types.Chan) - if !ok { - return nil // no core type as identical < len(terms) and U is not a channel. - } - // https://go.dev/ref/spec#Core_types - // "the type chan E if T contains only bidirectional channels, or the type chan<- E or - // <-chan E depending on the direction of the directional channels present." - for chans := identical; chans < len(terms); chans++ { - curr, ok := terms[chans].Type().Underlying().(*types.Chan) - if !ok { - return nil - } - if !types.Identical(ch.Elem(), curr.Elem()) { - return nil // channel elements are not identical. - } - if ch.Dir() == types.SendRecv { - // ch is bidirectional. We can safely always use curr's direction. - ch = curr - } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { - // ch and curr are not bidirectional and not the same direction. - return nil - } - } - return ch -} - -// _NormalTerms returns a slice of terms representing the normalized structural -// type restrictions of a type, if any. -// -// For all types other than *types.TypeParam, *types.Interface, and -// *types.Union, this is just a single term with Tilde() == false and -// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see -// below. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration type -// T[P interface{~int; m()}] int the structural restriction of the type -// parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// _NormalTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, _NormalTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the type is -// invalid, exceeds complexity bounds, or has an empty type set. In the latter -// case, _NormalTerms returns ErrEmptyTypeSet. -// -// _NormalTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { - switch typ := typ.(type) { - case *TypeParam: - return StructuralTerms(typ) - case *Union: - return UnionTermSet(typ) - case *types.Interface: - return InterfaceTermSet(typ) - default: - return []*Term{NewTerm(false, typ)}, nil - } -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d6714882..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go deleted file mode 100644 index 9c631b65..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "errors" - "fmt" - "go/types" - "os" - "strings" -) - -//go:generate go run copytermlist.go - -const debug = false - -var ErrEmptyTypeSet = errors.New("empty type set") - -// StructuralTerms returns a slice of terms representing the normalized -// structural type restrictions of a type parameter, if any. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration -// -// type T[P interface{~int; m()}] int -// -// the structural restriction of the type parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// StructuralTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, StructuralTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the -// constraint interface is invalid, exceeds complexity bounds, or has an empty -// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. -// -// StructuralTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { - constraint := tparam.Constraint() - if constraint == nil { - return nil, fmt.Errorf("%s has nil constraint", tparam) - } - iface, _ := constraint.Underlying().(*types.Interface) - if iface == nil { - return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) - } - return InterfaceTermSet(iface) -} - -// InterfaceTermSet computes the normalized terms for a constraint interface, -// returning an error if the term set cannot be computed or is empty. In the -// latter case, the error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { - return computeTermSet(iface) -} - -// UnionTermSet computes the normalized terms for a union, returning an error -// if the term set cannot be computed or is empty. In the latter case, the -// error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func UnionTermSet(union *Union) ([]*Term, error) { - return computeTermSet(union) -} - -func computeTermSet(typ types.Type) ([]*Term, error) { - tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) - if err != nil { - return nil, err - } - if tset.terms.isEmpty() { - return nil, ErrEmptyTypeSet - } - if tset.terms.isAll() { - return nil, nil - } - var terms []*Term - for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) - } - return terms, nil -} - -// A termSet holds the normalized set of terms for a given type. -// -// The name termSet is intentionally distinct from 'type set': a type set is -// all types that implement a type (and includes method restrictions), whereas -// a term set just represents the structural restrictions on a type. -type termSet struct { - complete bool - terms termlist -} - -func indentf(depth int, format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) -} - -func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { - if t == nil { - panic("nil type") - } - - if debug { - indentf(depth, "%s", t.String()) - defer func() { - if err != nil { - indentf(depth, "=> %s", err) - } else { - indentf(depth, "=> %s", res.terms.String()) - } - }() - } - - const maxTermCount = 100 - if tset, ok := seen[t]; ok { - if !tset.complete { - return nil, fmt.Errorf("cycle detected in the declaration of %s", t) - } - return tset, nil - } - - // Mark the current type as seen to avoid infinite recursion. - tset := new(termSet) - defer func() { - tset.complete = true - }() - seen[t] = tset - - switch u := t.Underlying().(type) { - case *types.Interface: - // The term set of an interface is the intersection of the term sets of its - // embedded types. - tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { - return nil, fmt.Errorf("invalid embedded type %T", embedded) - } - tset2, err := computeTermSetInternal(embedded, seen, depth+1) - if err != nil { - return nil, err - } - tset.terms = tset.terms.intersect(tset2.terms) - } - case *Union: - // The term set of a union is the union of term sets of its terms. - tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) - var terms termlist - switch t.Type().Underlying().(type) { - case *types.Interface: - tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) - if err != nil { - return nil, err - } - terms = tset2.terms - case *TypeParam, *Union: - // A stand-alone type parameter or union is not permitted as union - // term. - return nil, fmt.Errorf("invalid union term %T", t) - default: - if t.Type() == types.Typ[types.Invalid] { - continue - } - terms = termlist{{t.Tilde(), t.Type()}} - } - tset.terms = tset.terms.union(terms) - if len(tset.terms) > maxTermCount { - return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) - } - } - case *TypeParam: - panic("unreachable") - default: - // For all other types, the term set is just a single non-tilde term - // holding the type itself. - if u != types.Typ[types.Invalid] { - tset.terms = termlist{{false, t}} - } - } - return tset, nil -} - -// under is a facade for the go/types internal function of the same name. It is -// used by typeterm.go. -func under(t types.Type) types.Type { - return t.Underlying() -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go deleted file mode 100644 index cbd12f80..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import ( - "bytes" - "go/types" -) - -// A termlist represents the type set represented by the union -// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. -// A termlist is in normal form if all terms are disjoint. -// termlist operations don't require the operands to be in -// normal form. -type termlist []*term - -// allTermlist represents the set of all types. -// It is in normal form. -var allTermlist = termlist{new(term)} - -// String prints the termlist exactly (without normalization). -func (xl termlist) String() string { - if len(xl) == 0 { - return "∅" - } - var buf bytes.Buffer - for i, x := range xl { - if i > 0 { - buf.WriteString(" | ") - } - buf.WriteString(x.String()) - } - return buf.String() -} - -// isEmpty reports whether the termlist xl represents the empty set of types. -func (xl termlist) isEmpty() bool { - // If there's a non-nil term, the entire list is not empty. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil { - return false - } - } - return true -} - -// isAll reports whether the termlist xl represents the set of all types. -func (xl termlist) isAll() bool { - // If there's a 𝓤 term, the entire list is 𝓤. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil && x.typ == nil { - return true - } - } - return false -} - -// norm returns the normal form of xl. -func (xl termlist) norm() termlist { - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - used := make([]bool, len(xl)) - var rl termlist - for i, xi := range xl { - if xi == nil || used[i] { - continue - } - for j := i + 1; j < len(xl); j++ { - xj := xl[j] - if xj == nil || used[j] { - continue - } - if u1, u2 := xi.union(xj); u2 == nil { - // If we encounter a 𝓤 term, the entire list is 𝓤. - // Exit early. - // (Note that this is not just an optimization; - // if we continue, we may end up with a 𝓤 term - // and other terms and the result would not be - // in normal form.) - if u1.typ == nil { - return allTermlist - } - xi = u1 - used[j] = true // xj is now unioned into xi - ignore it in future iterations - } - } - rl = append(rl, xi) - } - return rl -} - -// union returns the union xl ∪ yl. -func (xl termlist) union(yl termlist) termlist { - return append(xl, yl...).norm() -} - -// intersect returns the intersection xl ∩ yl. -func (xl termlist) intersect(yl termlist) termlist { - if xl.isEmpty() || yl.isEmpty() { - return nil - } - - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - var rl termlist - for _, x := range xl { - for _, y := range yl { - if r := x.intersect(y); r != nil { - rl = append(rl, r) - } - } - } - return rl.norm() -} - -// equal reports whether xl and yl represent the same type set. -func (xl termlist) equal(yl termlist) bool { - // TODO(gri) this should be more efficient - return xl.subsetOf(yl) && yl.subsetOf(xl) -} - -// includes reports whether t ∈ xl. -func (xl termlist) includes(t types.Type) bool { - for _, x := range xl { - if x.includes(t) { - return true - } - } - return false -} - -// supersetOf reports whether y ⊆ xl. -func (xl termlist) supersetOf(y *term) bool { - for _, x := range xl { - if y.subsetOf(x) { - return true - } - } - return false -} - -// subsetOf reports whether xl ⊆ yl. -func (xl termlist) subsetOf(yl termlist) bool { - if yl.isEmpty() { - return xl.isEmpty() - } - - // each term x of xl must be a subset of yl - for _, x := range xl { - if !yl.supersetOf(x) { - return false // x is not a subset yl - } - } - return true -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index 7ed86e17..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) *types.Named { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index cf301af1..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) *types.Named { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go deleted file mode 100644 index 7350bb70..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import "go/types" - -// A term describes elementary type sets: -// -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -type term struct { - tilde bool // valid if typ != nil - typ types.Type -} - -func (x *term) String() string { - switch { - case x == nil: - return "∅" - case x.typ == nil: - return "𝓤" - case x.tilde: - return "~" + x.typ.String() - default: - return x.typ.String() - } -} - -// equal reports whether x and y represent the same type set. -func (x *term) equal(y *term) bool { - // easy cases - switch { - case x == nil || y == nil: - return x == y - case x.typ == nil || y.typ == nil: - return x.typ == y.typ - } - // ∅ ⊂ x, y ⊂ 𝓤 - - return x.tilde == y.tilde && types.Identical(x.typ, y.typ) -} - -// union returns the union x ∪ y: zero, one, or two non-nil terms. -func (x *term) union(y *term) (_, _ *term) { - // easy cases - switch { - case x == nil && y == nil: - return nil, nil // ∅ ∪ ∅ == ∅ - case x == nil: - return y, nil // ∅ ∪ y == y - case y == nil: - return x, nil // x ∪ ∅ == x - case x.typ == nil: - return x, nil // 𝓤 ∪ y == 𝓤 - case y.typ == nil: - return y, nil // x ∪ 𝓤 == 𝓤 - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return x, y // x ∪ y == (x, y) if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∪ ~t == ~t - // ~t ∪ T == ~t - // T ∪ ~t == ~t - // T ∪ T == T - if x.tilde || !y.tilde { - return x, nil - } - return y, nil -} - -// intersect returns the intersection x ∩ y. -func (x *term) intersect(y *term) *term { - // easy cases - switch { - case x == nil || y == nil: - return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ - case x.typ == nil: - return y // 𝓤 ∩ y == y - case y.typ == nil: - return x // x ∩ 𝓤 == x - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return nil // x ∩ y == ∅ if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ∩ ~t == ~t - // ~t ∩ T == T - // T ∩ ~t == T - // T ∩ T == T - if !x.tilde || y.tilde { - return x - } - return y -} - -// includes reports whether t ∈ x. -func (x *term) includes(t types.Type) bool { - // easy cases - switch { - case x == nil: - return false // t ∈ ∅ == false - case x.typ == nil: - return true // t ∈ 𝓤 == true - } - // ∅ ⊂ x ⊂ 𝓤 - - u := t - if x.tilde { - u = under(u) - } - return types.Identical(x.typ, u) -} - -// subsetOf reports whether x ⊆ y. -func (x *term) subsetOf(y *term) bool { - // easy cases - switch { - case x == nil: - return true // ∅ ⊆ y == true - case y == nil: - return false // x ⊆ ∅ == false since x != ∅ - case y.typ == nil: - return true // x ⊆ 𝓤 == true - case x.typ == nil: - return false // 𝓤 ⊆ y == false since y != 𝓤 - } - // ∅ ⊂ x, y ⊂ 𝓤 - - if x.disjoint(y) { - return false // x ⊆ y == false if x ∩ y == ∅ - } - // x.typ == y.typ - - // ~t ⊆ ~t == true - // ~t ⊆ T == false - // T ⊆ ~t == true - // T ⊆ T == true - return !x.tilde || y.tilde -} - -// disjoint reports whether x ∩ y == ∅. -// x.typ and y.typ must not be nil. -func (x *term) disjoint(y *term) bool { - if debug && (x.typ == nil || y.typ == nil) { - panic("invalid argument(s)") - } - ux := x.typ - if y.tilde { - ux = under(ux) - } - uy := y.typ - if x.tilde { - uy = under(uy) - } - return !types.Identical(ux, uy) -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 07484073..834e0538 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -167,7 +167,7 @@ const ( UntypedNilUse // WrongAssignCount occurs when the number of values on the right-hand side - // of an assignment or or initialization expression does not match the number + // of an assignment or initialization expression does not match the number // of variables on the left-hand side. // // Example: @@ -1449,10 +1449,10 @@ const ( NotAGenericType // WrongTypeArgCount occurs when a type or function is instantiated with an - // incorrent number of type arguments, including when a generic type or + // incorrect number of type arguments, including when a generic type or // function is used without instantiation. // - // Errors inolving failed type inference are assigned other error codes. + // Errors involving failed type inference are assigned other error codes. // // Example: // type T[p any] int diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go new file mode 100644 index 00000000..fea7c8b7 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// ReceiverNamed returns the named type (if any) associated with the +// type of recv, which may be of the form N or *N, or aliases thereof. +// It also reports whether a Pointer was present. +func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { + t := recv.Type() + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + isPtr = true + t = ptr.Elem() + } + named, _ = aliases.Unalias(t).(*types.Named) + return +} + +// Unpointer returns T given *T or an alias thereof. +// For all other types it is the identity function. +// It does not look at underlying types. +// The result may be an alias. +// +// Use this function to strip off the optional pointer on a receiver +// in a field or method selection, without losing the named type +// (which is needed to compute the method set). +// +// See also [typeparams.MustDeref], which removes one level of +// indirection from the type, regardless of named types (analogous to +// a LOAD instruction). +func Unpointer(t types.Type) types.Type { + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/vendor/golang.org/x/tools/internal/typesinternal/toonew.go new file mode 100644 index 00000000..cc86487e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/toonew.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// TooNewStdSymbols computes the set of package-level symbols +// exported by pkg that are not available at the specified version. +// The result maps each symbol to its minimum version. +// +// The pkg is allowed to contain type errors. +func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string { + disallowed := make(map[types.Object]string) + + // Pass 1: package-level symbols. + symbols := stdlib.PackageSymbols[pkg.Path()] + for _, sym := range symbols { + symver := sym.Version.String() + if versions.Before(version, symver) { + switch sym.Kind { + case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type: + disallowed[pkg.Scope().Lookup(sym.Name)] = symver + } + } + } + + // Pass 2: fields and methods. + // + // We allow fields and methods if their associated type is + // disallowed, as otherwise we would report false positives + // for compatibility shims. Consider: + // + // //go:build go1.22 + // type T struct { F std.Real } // correct new API + // + // //go:build !go1.22 + // type T struct { F fake } // shim + // type fake struct { ... } + // func (fake) M () {} + // + // These alternative declarations of T use either the std.Real + // type, introduced in go1.22, or a fake type, for the field + // F. (The fakery could be arbitrarily deep, involving more + // nested fields and methods than are shown here.) Clients + // that use the compatibility shim T will compile with any + // version of go, whether older or newer than go1.22, but only + // the newer version will use the std.Real implementation. + // + // Now consider a reference to method M in new(T).F.M() in a + // module that requires a minimum of go1.21. The analysis may + // occur using a version of Go higher than 1.21, selecting the + // first version of T, so the method M is Real.M. This would + // spuriously cause the analyzer to report a reference to a + // too-new symbol even though this expression compiles just + // fine (with the fake implementation) using go1.21. + for _, sym := range symbols { + symVersion := sym.Version.String() + if !versions.Before(version, symVersion) { + continue // allowed + } + + var obj types.Object + switch sym.Kind { + case stdlib.Field: + typename, name := sym.SplitField() + if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name) + } + + case stdlib.Method: + ptr, recvname, name := sym.SplitMethod() + if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name) + } + } + if obj != nil { + disallowed[obj] = symVersion + } + } + + return disallowed +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index ce7d4351..7c77c2fb 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -48,5 +48,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true } - -var SetGoVersion = func(conf *types.Config, version string) bool { return false } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go deleted file mode 100644 index a42b072a..00000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typesinternal - -import ( - "go/types" -) - -func init() { - SetGoVersion = func(conf *types.Config, version string) bool { - conf.GoVersion = version - return true - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go new file mode 100644 index 00000000..b53f1786 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// This file contains predicates for working with file versions to +// decide when a tool should consider a language feature enabled. + +// GoVersions that features in x/tools can be gated to. +const ( + Go1_18 = "go1.18" + Go1_19 = "go1.19" + Go1_20 = "go1.20" + Go1_21 = "go1.21" + Go1_22 = "go1.22" +) + +// Future is an invalid unknown Go version sometime in the future. +// Do not use directly with Compare. +const Future = "" + +// AtLeast reports whether the file version v comes after a Go release. +// +// Use this predicate to enable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func AtLeast(v, release string) bool { + if v == Future { + return true // an unknown future version is always after y. + } + return Compare(Lang(v), Lang(release)) >= 0 +} + +// Before reports whether the file version v is strictly before a Go release. +// +// Use this predicate to disable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func Before(v, release string) bool { + if v == Future { + return false // an unknown future version happens after y. + } + return Compare(Lang(v), Lang(release)) < 0 +} diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 00000000..bbabcd22 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go new file mode 100644 index 00000000..377bf7a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// toolchain is maximum version (<1.22) that the go toolchain used +// to build the current tool is known to support. +// +// When a tool is built with >=1.22, the value of toolchain is unused. +// +// x/tools does not support building with go <1.18. So we take this +// as the minimum possible maximum. +var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go new file mode 100644 index 00000000..f65beed9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package versions + +func init() { + if Compare(toolchain, Go1_19) < 0 { + toolchain = Go1_19 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go new file mode 100644 index 00000000..1a9efa12 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package versions + +func init() { + if Compare(toolchain, Go1_20) < 0 { + toolchain = Go1_20 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go new file mode 100644 index 00000000..b7ef216d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +func init() { + if Compare(toolchain, Go1_21) < 0 { + toolchain = Go1_21 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 00000000..562eef21 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 00000000..b4345d33 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,30 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersion returns a language version (<=1.21) derived from runtime.Version() +// or an unknown future version. +func FileVersion(info *types.Info, file *ast.File) string { + // In x/tools built with Go <= 1.21, we do not have Info.FileVersions + // available. We use a go version derived from the toolchain used to + // compile the tool by default. + // This will be <= go1.21. We take this as the maximum version that + // this tool can support. + // + // There are no features currently in x/tools that need to tell fine grained + // differences for versions <1.22. + return toolchain +} + +// InitFileVersions is a noop when compiled with this Go version. +func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 00000000..e8180632 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,41 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v + } + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go new file mode 100644 index 00000000..8d1f7453 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "strings" +) + +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 5f28148d..bb2966e3 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -94,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -106,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } @@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } + if discardUnknown { + return protoreflect.Value{}, true + } case json.Number: if n, ok := tok.Int(32); ok { @@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 21d5d2cb..ae71007c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 66b95870..29846df2 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -25,15 +25,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Marshal writes the given [proto.Message] in JSON format using default options. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -81,6 +83,25 @@ type MarshalOptions struct { // ╚═══════╧════════════════════════════╝ EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -91,8 +112,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -102,9 +124,10 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in -// MarshalOptions. Do not depend on the output being stable. It may change over -// time across different versions of the program. +// Marshal marshals the given [proto.Message] in the JSON format using options in +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } @@ -178,7 +201,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ protoreflect.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -192,6 +219,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -217,8 +247,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 6c37d417..4b177c82 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,48 +308,29 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4..24bc98ac 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41..1f57e661 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// Marshal writes the given [proto.Message] in textproto format using default +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -97,9 +100,10 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// Marshal writes the given [proto.Message] in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686c..e942bc98 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1..87e46bd4 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,62 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +283,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +357,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go new file mode 100644 index 00000000..14656b65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/defaults.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editiondefaults contains the binary representation of the editions +// defaults. +package editiondefaults + +import _ "embed" + +//go:embed editions_defaults.binpb +var Defaults []byte diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb new file mode 100644 index 00000000..ff6a3836 Binary files /dev/null and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go new file mode 100644 index 00000000..029a6a12 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package editionssupport defines constants for editions that are supported. +package editionssupport + +import descriptorpb "google.golang.org/protobuf/types/descriptorpb" + +const ( + Minimum = descriptorpb.Edition_EDITION_PROTO2 + Maximum = descriptorpb.Edition_EDITION_2023 +) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d043a6eb..ea1d3e65 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -121,7 +121,7 @@ func (d *Decoder) Read() (Token, error) { case ObjectClose: if len(d.openStack) == 0 || - d.lastToken.kind == comma || + d.lastToken.kind&(Name|comma) != 0 || d.openStack[len(d.openStack)-1] != ObjectOpen { return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) } @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d2083..7e87c760 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e78..099b2bf4 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35..c2d6bd52 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689ba..df53ff40 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -21,11 +22,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +60,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,21 +68,53 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures EditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + EditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED + // https://protobuf.dev/editions/features/#field_presence + IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the + // UnmarshalJSON([]byte) error method for enums. + GenerateLegacyUnmarshalJSON bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -117,6 +166,8 @@ type ( } EnumL1 struct { eagerValues bool // controls whether EnumL2.Values is already populated + + EditionFeatures EditionFeatures } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -155,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -178,6 +232,8 @@ type ( Extensions Extensions IsMapEntry bool // promoted from google.protobuf.MessageOptions IsMessageSet bool // promoted from google.protobuf.MessageOptions + + EditionFeatures EditionFeatures } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -202,14 +258,12 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + EditionFeatures EditionFeatures } Oneof struct { @@ -219,6 +273,8 @@ type ( OneofL1 struct { Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + + EditionFeatures EditionFeatures } ) @@ -268,25 +324,30 @@ func (fd *Field) Options() protoreflect.ProtoMessage { } func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Kind() protoreflect.Kind { + return fd.L1.Kind +} +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + if fd.L1.Cardinality == protoreflect.Repeated { + return false + } + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { - switch fd.L1.Kind { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - default: - return true - } + if fd.L1.Cardinality != protoreflect.Repeated { + return false } - return fd.L1.IsPacked + switch fd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -322,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -333,10 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -359,16 +421,16 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number protoreflect.FieldNumber - Extendee protoreflect.MessageDescriptor - Cardinality protoreflect.Cardinality - Kind protoreflect.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind + EditionFeatures EditionFeatures } ExtensionL2 struct { Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -391,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -472,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -515,6 +587,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -535,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 4a1584c9..8a57d60b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -5,6 +5,7 @@ package filedesc import ( + "fmt" "sync" "google.golang.org/protobuf/encoding/protowire" @@ -98,6 +99,7 @@ func (fd *File) unmarshalSeed(b []byte) { var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int + var options []byte b0 := b for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -111,8 +113,12 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 + case "editions": + fd.L1.Syntax = protoreflect.Editions default: panic("invalid syntax") } @@ -120,6 +126,8 @@ func (fd *File) unmarshalSeed(b []byte) { fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_Options_field_number: + options = v case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -154,6 +162,13 @@ func (fd *File) unmarshalSeed(b []byte) { numServices++ } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Edition_field_number: + fd.L1.Edition = Edition(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -164,6 +179,14 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 + } + + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) + + // Parse editions features from options if any + if options != nil { + fd.unmarshalSeedOptions(options) } // Must allocate all declarations before parsing each descriptor type @@ -219,10 +242,33 @@ func (fd *File) unmarshalSeed(b []byte) { } } +func (fd *File) unmarshalSeedOptions(b []byte) { + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileOptions_Features_field_number: + if fd.Syntax() != protoreflect.Editions { + panic(fmt.Sprintf("invalid descriptor: using edition features in a proto with syntax %s", fd.Syntax())) + } + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -275,6 +321,7 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i + md.L1.EditionFeatures = featuresFromParentDesc(md.Parent()) var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int @@ -380,6 +427,13 @@ func (md *Message) unmarshalSeedOptions(b []byte) { case genid.MessageOptions_MessageSetWireFormat_field_number: md.L1.IsMessageSet = protowire.DecodeBool(v) } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MessageOptions_Features_field_number: + md.L1.EditionFeatures = unmarshalFeatureSet(v, md.L1.EditionFeatures) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -391,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -415,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -447,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 736a19a7..e56c91a8 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. @@ -414,6 +419,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i + fd.L1.EditionFeatures = featuresFromParentDesc(fd.Parent()) var rawTypeName []byte var rawOptions []byte @@ -465,6 +471,12 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + fd.L1.Kind = protoreflect.GroupKind + } + if fd.L1.EditionFeatures.IsLegacyRequired { + fd.L1.Cardinality = protoreflect.Required + } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { @@ -489,13 +501,18 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + fd.L1.EditionFeatures = unmarshalFeatureSet(v, fd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -557,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -577,25 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fd..f4107c05 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go new file mode 100644 index 00000000..11f5f356 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -0,0 +1,156 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} + +func init() { + unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) +} + +func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + } + } + return parent +} + +func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSet_FieldPresence_field_number: + parent.IsFieldPresence = v == genid.FeatureSet_EXPLICIT_enum_value || v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + parent.IsLegacyRequired = v == genid.FeatureSet_LEGACY_REQUIRED_enum_value + case genid.FeatureSet_EnumType_field_number: + parent.IsOpenEnum = v == genid.FeatureSet_OPEN_enum_value + case genid.FeatureSet_RepeatedFieldEncoding_field_number: + parent.IsPacked = v == genid.FeatureSet_PACKED_enum_value + case genid.FeatureSet_Utf8Validation_field_number: + parent.IsUTF8Validated = v == genid.FeatureSet_VERIFY_enum_value + case genid.FeatureSet_MessageEncoding_field_number: + parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value + case genid.FeatureSet_JsonFormat_field_number: + parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + parent = unmarshalGoFeature(v, parent) + } + } + } + + return parent +} + +func featuresFromParentDesc(parentDesc protoreflect.Descriptor) EditionFeatures { + var parentFS EditionFeatures + switch p := parentDesc.(type) { + case *File: + parentFS = p.L1.EditionFeatures + case *Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + return parentFS +} + +func unmarshalEditionDefault(b []byte) { + var ed Edition + var fs EditionFeatures + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number: + ed = Edition(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + } + } + } + defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) +} + +func unmarshalEditionDefaults(b []byte) { + for len(b) > 0 { + num, _, n := protowire.ConsumeTag(b) + b = b[n:] + switch num { + case genid.FeatureSetDefaults_Defaults_field_number: + def, m := protowire.ConsumeBytes(b) + b = b[m:] + unmarshalEditionDefault(def) + case genid.FeatureSetDefaults_MinimumEdition_field_number, + genid.FeatureSetDefaults_MaximumEdition_field_number: + // We don't care about the minimum and maximum editions. If the + // edition we are looking for later on is not in the cache we know + // it is outside of the range between minimum and maximum edition. + _, m := protowire.ConsumeVarint(b) + b = b[m:] + default: + panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + } + } +} + +func getFeaturesFor(ed Edition) EditionFeatures { + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) + } + return defaultsCache[match] +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc..bfb3b841 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4e..ba83fea4 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b21..f30ab6b5 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,28 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + +// Enum values for google.protobuf.Edition. +const ( + Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 + Edition_EDITION_PROTO2_enum_value = 998 + Edition_EDITION_PROTO3_enum_value = 999 + Edition_EDITION_2023_enum_value = 1000 + Edition_EDITION_2024_enum_value = 1001 + Edition_EDITION_1_TEST_ONLY_enum_value = 1 + Edition_EDITION_2_TEST_ONLY_enum_value = 2 + Edition_EDITION_99997_TEST_ONLY_enum_value = 99997 + Edition_EDITION_99998_TEST_ONLY_enum_value = 99998 + Edition_EDITION_99999_TEST_ONLY_enum_value = 99999 + Edition_EDITION_MAX_enum_value = 2147483647 +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +103,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +206,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +219,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -204,6 +229,12 @@ const ( ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" ) +// Enum values for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_DECLARATION_enum_value = 0 + ExtensionRangeOptions_UNVERIFIED_enum_value = 1 +) + // Names for google.protobuf.ExtensionRangeOptions.Declaration. const ( ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" @@ -212,29 +243,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -291,12 +319,41 @@ const ( FieldDescriptorProto_Type_enum_name = "Type" ) +// Enum values for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_TYPE_DOUBLE_enum_value = 1 + FieldDescriptorProto_TYPE_FLOAT_enum_value = 2 + FieldDescriptorProto_TYPE_INT64_enum_value = 3 + FieldDescriptorProto_TYPE_UINT64_enum_value = 4 + FieldDescriptorProto_TYPE_INT32_enum_value = 5 + FieldDescriptorProto_TYPE_FIXED64_enum_value = 6 + FieldDescriptorProto_TYPE_FIXED32_enum_value = 7 + FieldDescriptorProto_TYPE_BOOL_enum_value = 8 + FieldDescriptorProto_TYPE_STRING_enum_value = 9 + FieldDescriptorProto_TYPE_GROUP_enum_value = 10 + FieldDescriptorProto_TYPE_MESSAGE_enum_value = 11 + FieldDescriptorProto_TYPE_BYTES_enum_value = 12 + FieldDescriptorProto_TYPE_UINT32_enum_value = 13 + FieldDescriptorProto_TYPE_ENUM_enum_value = 14 + FieldDescriptorProto_TYPE_SFIXED32_enum_value = 15 + FieldDescriptorProto_TYPE_SFIXED64_enum_value = 16 + FieldDescriptorProto_TYPE_SINT32_enum_value = 17 + FieldDescriptorProto_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.FieldDescriptorProto.Label. const ( FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" FieldDescriptorProto_Label_enum_name = "Label" ) +// Enum values for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_LABEL_OPTIONAL_enum_value = 1 + FieldDescriptorProto_LABEL_REPEATED_enum_value = 3 + FieldDescriptorProto_LABEL_REQUIRED_enum_value = 2 +) + // Names for google.protobuf.OneofDescriptorProto. const ( OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" @@ -468,7 +525,6 @@ const ( FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" @@ -478,6 +534,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -490,7 +547,6 @@ const ( FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" @@ -500,6 +556,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -515,7 +572,6 @@ const ( FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 @@ -525,6 +581,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -534,6 +591,13 @@ const ( FileOptions_OptimizeMode_enum_name = "OptimizeMode" ) +// Enum values for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_SPEED_enum_value = 1 + FileOptions_CODE_SIZE_enum_value = 2 + FileOptions_LITE_RUNTIME_enum_value = 3 +) + // Names for google.protobuf.MessageOptions. const ( MessageOptions_message_name protoreflect.Name = "MessageOptions" @@ -547,6 +611,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +619,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +630,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +651,10 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +666,10 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +684,10 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -624,24 +697,107 @@ const ( FieldOptions_CType_enum_name = "CType" ) +// Enum values for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_STRING_enum_value = 0 + FieldOptions_CORD_enum_value = 1 + FieldOptions_STRING_PIECE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.JSType. const ( FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" FieldOptions_JSType_enum_name = "JSType" ) +// Enum values for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JS_NORMAL_enum_value = 0 + FieldOptions_JS_STRING_enum_value = 1 + FieldOptions_JS_NUMBER_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionRetention. const ( FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" FieldOptions_OptionRetention_enum_name = "OptionRetention" ) +// Enum values for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_RETENTION_UNKNOWN_enum_value = 0 + FieldOptions_RETENTION_RUNTIME_enum_value = 1 + FieldOptions_RETENTION_SOURCE_enum_value = 2 +) + // Full and short names for google.protobuf.FieldOptions.OptionTargetType. const ( FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Enum values for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_TARGET_TYPE_UNKNOWN_enum_value = 0 + FieldOptions_TARGET_TYPE_FILE_enum_value = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE_enum_value = 2 + FieldOptions_TARGET_TYPE_MESSAGE_enum_value = 3 + FieldOptions_TARGET_TYPE_FIELD_enum_value = 4 + FieldOptions_TARGET_TYPE_ONEOF_enum_value = 5 + FieldOptions_TARGET_TYPE_ENUM_enum_value = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY_enum_value = 7 + FieldOptions_TARGET_TYPE_SERVICE_enum_value = 8 + FieldOptions_TARGET_TYPE_METHOD_enum_value = 9 +) + +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +806,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +830,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +845,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +858,24 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +887,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +913,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +926,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -759,6 +936,13 @@ const ( MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" ) +// Enum values for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN_enum_value = 0 + MethodOptions_NO_SIDE_EFFECTS_enum_value = 1 + MethodOptions_IDEMPOTENT_enum_value = 2 +) + // Names for google.protobuf.UninterpretedOption. const ( UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" @@ -816,6 +1000,166 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Enum values for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN_enum_value = 0 + FeatureSet_EXPLICIT_enum_value = 1 + FeatureSet_IMPLICIT_enum_value = 2 + FeatureSet_LEGACY_REQUIRED_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Enum values for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_ENUM_TYPE_UNKNOWN_enum_value = 0 + FeatureSet_OPEN_enum_value = 1 + FeatureSet_CLOSED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Enum values for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_PACKED_enum_value = 1 + FeatureSet_EXPANDED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Enum values for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN_enum_value = 0 + FeatureSet_VERIFY_enum_value = 2 + FeatureSet_NONE_enum_value = 3 +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Enum values for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN_enum_value = 0 + FeatureSet_LENGTH_PREFIXED_enum_value = 1 + FeatureSet_DELIMITED_enum_value = 2 +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Enum values for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JSON_FORMAT_UNKNOWN_enum_value = 0 + FeatureSet_ALLOW_enum_value = 1 + FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" @@ -917,3 +1261,10 @@ const ( GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) + +// Enum values for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_NONE_enum_value = 0 + GeneratedCodeInfo_Annotation_SET_enum_value = 1 + GeneratedCodeInfo_Annotation_ALIAS_enum_value = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go new file mode 100644 index 00000000..9a652a2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" + +// Names for google.protobuf.GoFeatures. +const ( + GoFeatures_message_name protoreflect.Name = "GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" +) + +// Field names for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" +) + +// Field numbers for google.protobuf.GoFeatures. +const ( + GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go index 1a38944b..ad6f80c4 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -18,6 +18,11 @@ const ( NullValue_enum_name = "NullValue" ) +// Enum values for google.protobuf.NullValue. +const ( + NullValue_NULL_VALUE_enum_value = 0 +) + // Names for google.protobuf.Struct. const ( Struct_message_name protoreflect.Name = "Struct" diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index e0f75fea..49bc73e2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -18,6 +18,13 @@ const ( Syntax_enum_name = "Syntax" ) +// Enum values for google.protobuf.Syntax. +const ( + Syntax_SYNTAX_PROTO2_enum_value = 0 + Syntax_SYNTAX_PROTO3_enum_value = 1 + Syntax_SYNTAX_EDITIONS_enum_value = 2 +) + // Names for google.protobuf.Type. const ( Type_message_name protoreflect.Name = "Type" @@ -105,12 +112,43 @@ const ( Field_Kind_enum_name = "Kind" ) +// Enum values for google.protobuf.Field.Kind. +const ( + Field_TYPE_UNKNOWN_enum_value = 0 + Field_TYPE_DOUBLE_enum_value = 1 + Field_TYPE_FLOAT_enum_value = 2 + Field_TYPE_INT64_enum_value = 3 + Field_TYPE_UINT64_enum_value = 4 + Field_TYPE_INT32_enum_value = 5 + Field_TYPE_FIXED64_enum_value = 6 + Field_TYPE_FIXED32_enum_value = 7 + Field_TYPE_BOOL_enum_value = 8 + Field_TYPE_STRING_enum_value = 9 + Field_TYPE_GROUP_enum_value = 10 + Field_TYPE_MESSAGE_enum_value = 11 + Field_TYPE_BYTES_enum_value = 12 + Field_TYPE_UINT32_enum_value = 13 + Field_TYPE_ENUM_enum_value = 14 + Field_TYPE_SFIXED32_enum_value = 15 + Field_TYPE_SFIXED64_enum_value = 16 + Field_TYPE_SINT32_enum_value = 17 + Field_TYPE_SINT64_enum_value = 18 +) + // Full and short names for google.protobuf.Field.Cardinality. const ( Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" Field_Cardinality_enum_name = "Cardinality" ) +// Enum values for google.protobuf.Field.Cardinality. +const ( + Field_CARDINALITY_UNKNOWN_enum_value = 0 + Field_CARDINALITY_OPTIONAL_enum_value = 1 + Field_CARDINALITY_REQUIRED_enum_value = 2 + Field_CARDINALITY_REPEATED_enum_value = 3 +) + // Names for google.protobuf.Enum. const ( Enum_message_name protoreflect.Name = "Enum" diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98d..5d5771c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041ed..f29e6a8f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index e74cefdc..4bb0a7a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -21,26 +21,18 @@ type extensionFieldInfo struct { validation validationInfo } -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e + // Ideally we'd cache the resulting *extensionFieldInfo so we don't have to + // recompute this metadata repeatedly. But without support for something like + // weak references, such a cache would pin temporary values (like dynamic + // extension types, constructed for the duration of a user request) to the + // heap forever, causing memory usage of the cache to grow unbounded. + // See discussion in https://github.com/golang/protobuf/issues/1521. + return makeExtensionFieldInfo(xt.TypeDescriptor()) } func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { @@ -107,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241..78ee47e4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63..f55dc01e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16..fb35f0ba 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf..7a16ec13 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index 576dcf3a..13077751 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -197,7 +197,7 @@ func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: + case !fd.HasPresence() && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2ef..e06ece55 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f8913651..18cb96fd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a05..304244a6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6..febd2122 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0ba..e31249f6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb..81b2b1a7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d05..6e8677ee 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab09108..b649f112 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fa..bf0b6049 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,15 +204,21 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]any); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } @@ -245,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -255,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -302,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -339,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { @@ -558,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a..019399d4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]any); ok { + oneofWrappers = vs } } } @@ -251,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010b..ecb4623d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +292,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +302,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -394,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -422,13 +421,13 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +456,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 5e736c60..986322b1 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -538,6 +538,6 @@ func isZero(v reflect.Value) bool { } return true default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b..99dc23c6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf..da685e8a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573..5f20ca5d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e..a1f09162 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go index 0b74e765..a6e7df24 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -17,7 +17,7 @@ import ( // EnforceUTF8 reports whether to enforce strict UTF-8 validation. func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { + if flags.ProtoLegacy || fd.Syntax() == protoreflect.Editions { if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { return fd.EnforceUTF8() } diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 61a84d34..a008acd0 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 00000000..60166f2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d..dbbf1f68 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 - Patch = 0 + Minor = 34 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946..d75a6534 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,6 +51,8 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err @@ -69,7 +71,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717..80ed16a0 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d..1f847bcc 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -70,7 +75,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -129,7 +162,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } @@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda..d248f292 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,22 +11,25 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) @@ -36,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -48,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -75,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab33..3c6fe578 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45..575d1483 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b..7543ee6b 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49..052fb5ae 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb120..8fbecb4f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,16 +3,17 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc import ( + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" @@ -24,11 +25,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +62,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -91,11 +92,19 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot switch fd.GetSyntax() { case "proto2", "": f.L1.Syntax = protoreflect.Proto2 + f.L1.Edition = filedesc.EditionProto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + f.L1.Edition = filedesc.EditionProto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +117,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -210,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { return nil, err } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { return nil, err } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { return nil, err } @@ -231,7 +241,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1a..85617554 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -28,6 +28,7 @@ func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProt opts = proto.Clone(opts).(*descriptorpb.EnumOptions) e.L2.Options = func() protoreflect.ProtoMessage { return opts } } + e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) for _, s := range ed.GetReservedName() { e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) } @@ -68,6 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { return nil, err } + m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) if opts := md.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.MessageOptions) m.L2.Options = func() protoreflect.ProtoMessage { return opts } @@ -114,6 +116,27 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt return ms, nil } +// canBePacked returns whether the field can use packed encoding: +// https://protobuf.dev/programming-guides/encoding/#packed +func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { + if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + return false // not a repeated field + } + + switch protoreflect.Kind(fd.GetType()) { + case protoreflect.MessageKind, protoreflect.GroupKind: + return false // not a scalar type field + + case protoreflect.StringKind, protoreflect.BytesKind: + // string and bytes can explicitly not be declared as packed, + // see https://protobuf.dev/programming-guides/encoding/#packed + return false + + default: + return true + } +} + func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers for i, fd := range fds { @@ -121,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { return nil, err } + f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) f.L1.IsProto3Optional = fd.GetProto3Optional() if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() + if opts.Packed != nil { + f.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) @@ -137,6 +162,14 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.L1.EditionFeatures.IsLegacyRequired { + f.L1.Cardinality = protoreflect.Required + } + + if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { + f.L1.Kind = protoreflect.GroupKind + } } return fs, nil } @@ -148,6 +181,7 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { return nil, err } + o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) if opts := od.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.OneofOptions) o.L1.Options = func() protoreflect.ProtoMessage { return opts } @@ -164,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { return nil, err } + x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) if opts := xd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() + if opts.Packed != nil { + x.L1.EditionFeatures.IsPacked = opts.GetPacked() + } } x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 27d7e350..f3cebab2 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { @@ -276,8 +281,8 @@ func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvab } else if err != nil { return v, ev, err } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") + if !fd.HasPresence() { + return v, ev, errors.New("cannot be specified with implicit field presence") } if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { return v, ev, errors.New("cannot be specified on composite types") diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 9af1d564..6de31c2e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri if allowAlias && !foundAlias { return errors.New("enum %q allows aliases, but none were found", e.FullName()) } - if e.Syntax() == protoreflect.Proto3 { + if !e.IsClosed() { if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) } - // Verify that value names in proto3 do not conflict if the + // Verify that value names in open enums do not conflict if the // case-insensitive prefix is removed. // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 names := map[string]protoreflect.EnumValueDescriptor{} @@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri v1 := e.Values().Get(i) s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) } names[s] = v1 } @@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri return nil } -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { +func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + // There are a few limited exceptions only for proto3 + isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) for i, md := range mds { m := &ms[i] @@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if isMessageSet && !flags.ProtoLegacy { return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) } - if m.Syntax() == protoreflect.Proto3 { + if isProto3 { if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { @@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) } if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { + if !isProto3 { return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) } if f.Cardinality() != protoreflect.Optional { @@ -162,26 +152,29 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if f.IsWeak() && !flags.ProtoLegacy { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { return errors.New("message field %q may only be weak for an optional message", f.FullName()) } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } - if err := checkValidGroup(f); err != nil { + if err := checkValidGroup(file, f); err != nil { return errors.New("message field %q is an invalid group: %v", f.FullName(), err) } if err := checkValidMap(f); err != nil { return errors.New("message field %q is an invalid map: %v", f.FullName(), err) } - if f.Syntax() == protoreflect.Proto3 { + if isProto3 { if f.Cardinality() == protoreflect.Required { return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) } } + if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { + return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) + } } seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs for j := range md.GetOneofDecl() { @@ -215,17 +208,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { return err } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { return err } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { return err } } return nil } -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { +func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { for i, xd := range xds { x := &xs[i] // NOTE: Avoid using the IsValid method since extensions to MessageSet @@ -267,13 +260,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb. if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } - if err := checkValidGroup(x); err != nil { + if err := checkValidGroup(f, x); err != nil { return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) } if md := x.Message(); md != nil && md.IsMapEntry() { return errors.New("extension field %q cannot be a map entry", x.FullName()) } - if x.Syntax() == protoreflect.Proto3 { + if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { switch x.ContainingMessage().FullName() { case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): @@ -309,21 +302,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool { // checkValidGroup reports whether fd is a valid group according to the same // rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { +func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { md := fd.Message() switch { case fd.Kind() != protoreflect.GroupKind: return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") + case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): + return errors.New("invalid under proto3 semantics") case md == nil || md.IsPlaceholder(): return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") + } + if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { + switch { + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } } return nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 00000000..804830ed --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/editiondefaults" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" +) + +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(editiondefaults.Defaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fsed := defaults.GetDefaults()[0] + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fsed = def + } else { + break + } + } + fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) + proto.Merge(fs, fsed.GetOverridableFeatures()) + defaultsCache[ed] = fs + return fs +} + +// mergeEditionFeatures merges the parent and child feature sets. This function +// should be used when initializing Go descriptors from descriptor protos which +// is why the parent is a filedesc.EditionsFeatures (Go representation) while +// the child is a descriptorproto.FeatureSet (protoc representation). +// Any feature set by the child overwrites what is set by the parent. +func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { + var parentFS filedesc.EditionFeatures + switch p := parentDesc.(type) { + case *filedesc.File: + parentFS = p.L1.EditionFeatures + case *filedesc.Message: + parentFS = p.L1.EditionFeatures + default: + panic(fmt.Sprintf("unknown parent type %T", parentDesc)) + } + if child == nil { + return parentFS + } + if fp := child.FieldPresence; fp != nil { + parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || + *fp == descriptorpb.FeatureSet_EXPLICIT + parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED + } + if et := child.EnumType; et != nil { + parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN + } + + if rfe := child.RepeatedFieldEncoding; rfe != nil { + parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED + } + + if utf8val := child.Utf8Validation; utf8val != nil { + parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY + } + + if me := child.MessageEncoding; me != nil { + parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED + } + + if jf := child.JsonFormat; jf != nil { + parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW + } + + if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { + if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { + parentFS.GenerateLegacyUnmarshalJSON = *luje + } + } + + return parentFS +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + // initialize the featureset with the defaults + fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) + // overwrite any options explicitly specified + fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceff..a5de8d40 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,23 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } + if file.Syntax() == protoreflect.Editions { + desc := file + if fileImportDesc, ok := file.(protoreflect.FileImport); ok { + desc = fileImportDesc.FileDescriptor + } + + if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { + p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() + } + } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +129,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { p.Proto3Optional = proto.Bool(true) } + if field.Syntax() == protoreflect.Editions { + // Editions have no group keyword, this type is only set so that downstream users continue + // treating this as delimited encoding. + if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { + p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() + } + // Editions have no required keyword, this label is only set so that downstream users continue + // treating it as required. + if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { + p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() + } + } if field.HasDefault() { def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) if err != nil && field.DefaultEnumValue() != nil { @@ -168,7 +190,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +199,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +222,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +232,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +245,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa1492..c85bfaa5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┐ // │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,14 +153,15 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false @@ -172,6 +175,8 @@ func (s Syntax) String() string { return "proto2" case Proto3: return "proto3" + case Editions: + return "editions" default: return fmt.Sprintf("", s) } @@ -436,7 +441,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +485,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f..ea154eec 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -160,8 +160,6 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "java_generic_services", nil) case 18: b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) case 23: b = p.appendSingularField(b, "deprecated", nil) case 31: @@ -180,6 +178,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +240,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +287,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +334,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +367,41 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +453,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +481,12 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +502,44 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +564,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d..cd8fadba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,20 +519,20 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -544,11 +544,17 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +616,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b78..a7b0d06f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 59165254..654599d4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f..75f83a2a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73..9fe83cef 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // ╔════════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,12 +65,12 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // ╔═════════╤═════════════════════════════════════╗ // ║ Go type │ Protobuf kind ║ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // ║ string │ StringKind ║ // ╚═════════╧═════════════════════════════════════╝ // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 93% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 702ddf22..7f3583ea 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 00000000..f7d38699 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t any) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v any) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x any) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb55977..de177733 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f73..9403eb07 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,108 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // A placeholder edition for specifying default behaviors *before* a feature + // was first introduced. This is effectively an "infinite past". + Edition_EDITION_LEGACY Edition = 900 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + Edition_EDITION_2024 Edition = 1001 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 + // Placeholder for specifying unbounded edition support. This should only + // ever be used by plugins that can expect to never require any changes to + // support a new edition. + Edition_EDITION_MAX Edition = 2147483647 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 900: "EDITION_LEGACY", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1001: "EDITION_2024", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + 2147483647: "EDITION_MAX", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_LEGACY": 900, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_2024": 1001, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + "EDITION_MAX": 2147483647, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +182,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +227,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +298,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +329,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +361,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +422,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +488,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +550,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +612,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +696,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +758,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +784,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 + FeatureSet_NONE FeatureSet_Utf8Validation = 3 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 2: "VERIFY", + 3: "NONE", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "VERIFY": 2, + "NONE": 3, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1179,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1202,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1285,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1405,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1542,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1605,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1186,12 +1657,12 @@ type FieldDescriptorProto struct { // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. + // When proto3_optional is true, this field must belong to a oneof to signal + // to old proto3 clients that presence is tracked for this field. This oneof + // is known as a "synthetic" oneof, and this field must be its sole member + // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + // exist in the descriptor only, and do not generate any API. Synthetic oneofs + // must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still @@ -1711,12 +2182,16 @@ type FileOptions struct { // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. + // A proto2 file can set this to true to opt in to UTF-8 checking for Java, + // which will throw an exception if invalid UTF-8 is parsed from the wire or + // assigned to a string field. + // + // TODO: clarify exactly what kinds of field types this option + // applies to, and update these docs accordingly. + // + // Proto3 files already perform these checks. Setting the option explicitly to + // false has no effect: it cannot be used to opt proto3 files out of UTF-8 + // checks. JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` // Sets the Go package where structs generated from this .proto will be @@ -1738,7 +2213,6 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -1772,6 +2246,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1785,7 +2261,6 @@ const ( Default_FileOptions_CcGenericServices = bool(false) Default_FileOptions_JavaGenericServices = bool(false) Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) Default_FileOptions_Deprecated = bool(false) Default_FileOptions_CcEnableArenas = bool(true) ) @@ -1893,13 +2368,6 @@ func (x *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - func (x *FileOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1963,6 +2431,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2006,10 +2481,6 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - // // Whether the message is an automatically generated map entry type for the // maps field. // @@ -2030,6 +2501,10 @@ type MessageOptions struct { // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // Enable the legacy handling of JSON field name conflicts. This lowercases // and strips underscored from the fields before comparison in proto3 only. @@ -2039,11 +2514,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2600,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2631,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2178,19 +2664,11 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - // - // As of May 2022, lazy verifies the contents of the byte stream during - // parsing. An invalid byte stream will cause the overall parsing to fail. + // Note that lazy message fields are still eagerly verified to check + // ill-formed wireformat or missing required fields. Calling IsInitialized() + // on the outer message would fail if the inner message has missing required + // fields. Failed verification would result in parsing failure (except when + // uninitialized messages are acceptable). Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` // unverified_lazy does no correctness checks on the byte stream. This should // only be used where lazy with verification is prohibitive for performance @@ -2205,11 +2683,13 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,17 +2800,30 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets } - return FieldOptions_TARGET_TYPE_UNKNOWN + return nil } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { if x != nil { - return x.Targets + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport } return nil } @@ -2348,6 +2841,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2879,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2911,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2981,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +3006,22 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3063,27 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3097,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3145,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3178,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3236,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2794,6 +3353,171 @@ func (x *UninterpretedOption) GetAggregateValue() string { return "" } +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { + if x != nil { + return x.Defaults + } + return nil +} + +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN +} + // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { @@ -2855,7 +3579,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3592,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3605,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3631,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3644,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3657,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3680,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3693,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3745,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3758,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3802,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3814,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3827,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3864,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3896,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3909,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3939,143 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + +// Information about the support window of a feature. +type FieldOptions_FeatureSupport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The edition that this feature was first available in. In editions + // earlier than this one, the default assigned to EDITION_LEGACY will be + // used, and proto files will not be able to override it. + EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` + // The edition this feature becomes deprecated in. Using this after this + // edition may trigger warnings. + EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` + // The deprecation warning text if this feature is used after the edition it + // was marked deprecated in. + DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` + // The edition this feature is no longer available in. In editions after + // this one, the last default assigned will be used, and proto files will + // not be able to override it. + EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` +} + +func (x *FieldOptions_FeatureSupport) Reset() { + *x = FieldOptions_FeatureSupport{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_FeatureSupport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_FeatureSupport) ProtoMessage() {} + +func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. +func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { + if x != nil && x.EditionIntroduced != nil { + return *x.EditionIntroduced + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { + if x != nil && x.EditionDeprecated != nil { + return *x.EditionDeprecated + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { + if x != nil && x.DeprecationWarning != nil { + return *x.DeprecationWarning + } + return "" +} + +func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { + if x != nil && x.EditionRemoved != nil { + return *x.EditionRemoved + } + return Edition_EDITION_UNKNOWN +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +4093,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +4106,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4136,75 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + // Defaults of features that can be overridden in this edition. + OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` + // Defaults of features that can't be overridden in this edition. + FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { + if x != nil { + return x.OverridableFeatures + } + return nil +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { + if x != nil { + return x.FixedFeatures + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3296,7 +4214,7 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition occurs. + // the root FileDescriptorProto to the place where the definition appears. // For example, this path: // // [ 4, 3, 2, 7, 1 ] @@ -3388,7 +4306,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4319,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4332,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4393,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4406,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4419,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4468,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,250 +4506,250 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, + 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, + 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, + 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, + 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, + 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, + 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, + 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, + 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, + 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, + 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, + 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, + 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, + 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, @@ -3856,88 +4774,130 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, + 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, + 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, @@ -3967,14 +4927,18 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, @@ -3985,130 +4949,284 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5241,143 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_google_protobuf_descriptor_proto_goTypes = []any{ + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4228,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -4240,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -4252,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -4264,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -4278,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -4290,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -4302,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -4314,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -4326,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -4338,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -4350,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -4364,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -4378,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -4392,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -4406,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -4420,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -4434,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -4448,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -4462,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -4474,7 +5632,33 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSetDefaults); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -4486,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -4498,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -4510,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -4522,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -4534,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -4546,7 +5730,31 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { + switch v := v.(*FieldOptions_FeatureSupport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -4558,7 +5766,19 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -4570,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5808,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 33, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go index f77ef0de..1ba1dfa5 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -49,12 +49,13 @@ type extensionType struct { // A Message is a dynamically constructed protocol buffer message. // -// Message implements the proto.Message interface, and may be used with all -// standard proto package functions such as Marshal, Unmarshal, and so forth. +// Message implements the [google.golang.org/protobuf/proto.Message] interface, +// and may be used with all standard proto package functions +// such as Marshal, Unmarshal, and so forth. // -// Message also implements the protoreflect.Message interface. See the protoreflect -// package documentation for that interface for how to get and set fields and -// otherwise interact with the contents of a Message. +// Message also implements the [protoreflect.Message] interface. +// See the [protoreflect] package documentation for that interface for how to +// get and set fields and otherwise interact with the contents of a Message. // // Reflection API functions which construct messages, such as NewField, // return new dynamic messages of the appropriate type. Functions which take @@ -87,7 +88,7 @@ func NewMessage(desc protoreflect.MessageDescriptor) *Message { // ProtoMessage implements the legacy message interface. func (m *Message) ProtoMessage() {} -// ProtoReflect implements the protoreflect.ProtoMessage interface. +// ProtoReflect implements the [protoreflect.ProtoMessage] interface. func (m *Message) ProtoReflect() protoreflect.Message { return m } @@ -115,25 +116,25 @@ func (m *Message) Type() protoreflect.MessageType { } // New returns a newly allocated empty message with the same descriptor. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) New() protoreflect.Message { return m.Type().New() } // Interface returns the message. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Interface() protoreflect.ProtoMessage { return m } -// ProtoMethods is an internal detail of the protoreflect.Message interface. +// ProtoMethods is an internal detail of the [protoreflect.Message] interface. // Users should never call this directly. func (m *Message) ProtoMethods() *protoiface.Methods { return nil } // Range visits every populated field in undefined order. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { for num, v := range m.known { fd := m.ext[num] @@ -150,7 +151,7 @@ func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) } // Has reports whether a field is populated. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { m.checkField(fd) if fd.IsExtension() && m.ext[fd.Number()] != fd { @@ -164,7 +165,7 @@ func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { } // Clear clears a field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Clear(fd protoreflect.FieldDescriptor) { m.checkField(fd) num := fd.Number() @@ -173,7 +174,7 @@ func (m *Message) Clear(fd protoreflect.FieldDescriptor) { } // Get returns the value of a field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) num := fd.Number() @@ -212,7 +213,7 @@ func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { } // Mutable returns a mutable reference to a repeated, map, or message field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { @@ -241,7 +242,7 @@ func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { } // Set stores a value in a field. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { m.checkField(fd) if m.known == nil { @@ -284,7 +285,7 @@ func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) { } // NewField returns a new value for assignable to the field of a given descriptor. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { m.checkField(fd) switch { @@ -293,7 +294,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { case fd.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: fd, - mapv: make(map[interface{}]protoreflect.Value), + mapv: make(map[any]protoreflect.Value), }) case fd.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: fd}) @@ -305,7 +306,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { } // WhichOneof reports which field in a oneof is populated, returning nil if none are populated. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { for i := 0; i < od.Fields().Len(); i++ { fd := od.Fields().Get(i) @@ -317,13 +318,13 @@ func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.Field } // GetUnknown returns the raw unknown fields. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) GetUnknown() protoreflect.RawFields { return m.unknown } // SetUnknown sets the raw unknown fields. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) SetUnknown(r protoreflect.RawFields) { if m.known == nil { panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) @@ -332,7 +333,7 @@ func (m *Message) SetUnknown(r protoreflect.RawFields) { } // IsValid reports whether the message is valid. -// See protoreflect.Message for details. +// See [protoreflect.Message] for details. func (m *Message) IsValid() bool { return m.known != nil } @@ -449,7 +450,7 @@ func (x *dynamicList) IsValid() bool { type dynamicMap struct { desc protoreflect.FieldDescriptor - mapv map[interface{}]protoreflect.Value + mapv map[any]protoreflect.Value } func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } @@ -498,7 +499,7 @@ func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { return v.List().Len() > 0 case fd.ContainingOneof() != nil: return true - case fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension(): + case !fd.HasPresence() && !fd.IsExtension(): switch fd.Kind() { case protoreflect.BoolKind: return v.Bool() @@ -633,11 +634,11 @@ func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { // // The InterfaceOf and ValueOf methods of the extension type are defined as: // -// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +// func (xt extensionType) ValueOf(iv any) protoreflect.Value { // return protoreflect.ValueOf(iv) // } // -// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +// func (xt extensionType) InterfaceOf(v protoreflect.Value) any { // return v.Interface() // } // @@ -657,7 +658,7 @@ func (xt extensionType) New() protoreflect.Value { case xt.desc.IsMap(): return protoreflect.ValueOfMap(&dynamicMap{ desc: xt.desc, - mapv: make(map[interface{}]protoreflect.Value), + mapv: make(map[any]protoreflect.Value), }) case xt.desc.IsList(): return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) @@ -685,18 +686,18 @@ func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { return xt.desc } -func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +func (xt extensionType) ValueOf(iv any) protoreflect.Value { v := protoreflect.ValueOf(iv) typecheck(xt.desc, v) return v } -func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +func (xt extensionType) InterfaceOf(v protoreflect.Value) any { typecheck(xt.desc, v) return v.Interface() } -func (xt extensionType) IsValidInterface(iv interface{}) bool { +func (xt extensionType) IsValidInterface(iv any) bool { return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil } diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go index 5a8010f1..c432817b 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -23,13 +23,20 @@ type extField struct { // A Types is a collection of dynamically constructed descriptors. // Its methods are safe for concurrent use. // -// Types implements protoregistry.MessageTypeResolver and protoregistry.ExtensionTypeResolver. -// A Types may be used as a proto.UnmarshalOptions.Resolver. +// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver]. +// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver]. type Types struct { + // atomicExtFiles is used with sync/atomic and hence must be the first word + // of the struct to guarantee 64-bit alignment. + // + // TODO(stapelberg): once we only support Go 1.19 and newer, switch this + // field to be of type atomic.Uint64 to guarantee alignment on + // stack-allocated values, too. + atomicExtFiles uint64 + extMu sync.Mutex + files *protoregistry.Files - extMu sync.Mutex - atomicExtFiles uint64 extensionsByMessage map[extField]protoreflect.ExtensionDescriptor } @@ -45,7 +52,7 @@ func NewTypes(f *protoregistry.Files) *Types { // FindEnumByName looks up an enum by its full name; // e.g., "google.protobuf.Field.Kind". // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -63,7 +70,7 @@ func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumTyp // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -79,7 +86,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex // FindExtensionByNumber looks up an extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { // Construct the extension number map lazily, since not every user will need it. // Update the map if new files are added to the registry. @@ -96,7 +103,7 @@ func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field proto // FindMessageByName looks up a message by its full name; // e.g. "google.protobuf.Any". // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) { d, err := t.files.FindDescriptorByName(name) if err != nil { @@ -112,7 +119,7 @@ func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.Mess // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, protoregistry.NotFound) if not found. +// This returns (nil, [protoregistry.NotFound]) if not found. func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go new file mode 100644 index 00000000..a2ca940c --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -0,0 +1,181 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2023 Google Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/go_features.proto + +package gofeaturespb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +type GoFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Whether or not to generate the deprecated UnmarshalJSON method for enums. + LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` +} + +func (x *GoFeatures) Reset() { + *x = GoFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoFeatures) ProtoMessage() {} + +func (x *GoFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. +func (*GoFeatures) Descriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} +} + +func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { + if x != nil && x.LegacyUnmarshalJsonEnum != nil { + return *x.LegacyUnmarshalJsonEnum + } + return false +} + +var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FeatureSet)(nil), + ExtensionType: (*GoFeatures)(nil), + Field: 1002, + Name: "pb.go", + Tag: "bytes,1002,opt,name=go", + Filename: "google/protobuf/go_features.proto", + }, +} + +// Extension fields to descriptorpb.FeatureSet. +var ( + // optional pb.GoFeatures go = 1002; + E_Go = &file_google_protobuf_go_features_proto_extTypes[0] +) + +var File_google_protobuf_go_features_proto protoreflect.FileDescriptor + +var file_google_protobuf_go_features_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, +} + +var ( + file_google_protobuf_go_features_proto_rawDescOnce sync.Once + file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc +) + +func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { + file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + }) + return file_google_protobuf_go_features_proto_rawDescData +} + +var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_go_features_proto_goTypes = []any{ + (*GoFeatures)(nil), // 0: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet +} +var file_google_protobuf_go_features_proto_depIdxs = []int32{ + 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet + 0, // 1: pb.go:type_name -> pb.GoFeatures + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_go_features_proto_init() } +func file_google_protobuf_go_features_proto_init() { + if File_google_protobuf_go_features_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GoFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_protobuf_go_features_proto_goTypes, + DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + MessageInfos: file_google_protobuf_go_features_proto_msgTypes, + ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, + }.Build() + File_google_protobuf_go_features_proto = out.File + file_google_protobuf_go_features_proto_rawDesc = nil + file_google_protobuf_go_features_proto_goTypes = nil + file_google_protobuf_go_features_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f..7172b43d 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. @@ -444,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -461,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8d..1b71bcd9 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 9a7277ba..d87b4fb8 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ +var file_google_protobuf_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e8789cb3..ac1e91bb 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ +var file_google_protobuf_field_mask_proto_goTypes = []any{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d2bac8b8..d45361cb 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -49,11 +49,11 @@ // The standard Go "encoding/json" package has functionality to serialize // arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and // ListValue.AsSlice methods can convert the protobuf message representation into -// a form represented by interface{}, map[string]interface{}, and []interface{}. +// a form represented by any, map[string]any, and []any. // This form can be used with other packages that operate on such data structures // and also directly with the standard json package. // -// In order to convert the interface{}, map[string]interface{}, and []interface{} +// In order to convert the any, map[string]any, and []any // forms back as Value, Struct, and ListValue messages, use the NewStruct, // NewList, and NewValue constructor functions. // @@ -88,28 +88,28 @@ // // To construct a Value message representing the above JSON object: // -// m, err := structpb.NewValue(map[string]interface{}{ +// m, err := structpb.NewValue(map[string]any{ // "firstName": "John", // "lastName": "Smith", // "isAlive": true, // "age": 27, -// "address": map[string]interface{}{ +// "address": map[string]any{ // "streetAddress": "21 2nd Street", // "city": "New York", // "state": "NY", // "postalCode": "10021-3100", // }, -// "phoneNumbers": []interface{}{ -// map[string]interface{}{ +// "phoneNumbers": []any{ +// map[string]any{ // "type": "home", // "number": "212 555-1234", // }, -// map[string]interface{}{ +// map[string]any{ // "type": "office", // "number": "646 555-4567", // }, // }, -// "children": []interface{}{}, +// "children": []any{}, // "spouse": nil, // }) // if err != nil { @@ -197,7 +197,7 @@ type Struct struct { // NewStruct constructs a Struct from a general-purpose Go map. // The map keys must be valid UTF-8. // The map values are converted using NewValue. -func NewStruct(v map[string]interface{}) (*Struct, error) { +func NewStruct(v map[string]any) (*Struct, error) { x := &Struct{Fields: make(map[string]*Value, len(v))} for k, v := range v { if !utf8.ValidString(k) { @@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) { // AsMap converts x to a general-purpose Go map. // The map values are converted by calling Value.AsInterface. -func (x *Struct) AsMap() map[string]interface{} { +func (x *Struct) AsMap() map[string]any { f := x.GetFields() - vs := make(map[string]interface{}, len(f)) + vs := make(map[string]any, len(f)) for k, v := range f { vs[k] = v.AsInterface() } @@ -306,13 +306,13 @@ type Value struct { // ║ float32, float64 │ stored as NumberValue ║ // ║ string │ stored as StringValue; must be valid UTF-8 ║ // ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]interface{} │ stored as StructValue ║ -// ║ []interface{} │ stored as ListValue ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ // ╚════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. -func NewValue(v interface{}) (*Value, error) { +func NewValue(v any) (*Value, error) { switch v := v.(type) { case nil: return NewNullValue(), nil @@ -342,13 +342,13 @@ func NewValue(v interface{}) (*Value, error) { case []byte: s := base64.StdEncoding.EncodeToString(v) return NewStringValue(s), nil - case map[string]interface{}: + case map[string]any: v2, err := NewStruct(v) if err != nil { return nil, err } return NewStructValue(v2), nil - case []interface{}: + case []any: v2, err := NewList(v) if err != nil { return nil, err @@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value { // // Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are // converted as strings to remain compatible with MarshalJSON. -func (x *Value) AsInterface() interface{} { +func (x *Value) AsInterface() any { switch v := x.GetKind().(type) { case *Value_NumberValue: if v != nil { @@ -580,7 +580,7 @@ type ListValue struct { // NewList constructs a ListValue from a general-purpose Go slice. // The slice elements are converted using NewValue. -func NewList(v []interface{}) (*ListValue, error) { +func NewList(v []any) (*ListValue, error) { x := &ListValue{Values: make([]*Value, len(v))} for i, v := range v { var err error @@ -594,9 +594,9 @@ func NewList(v []interface{}) (*ListValue, error) { // AsSlice converts x to a general-purpose Go slice. // The slice elements are converted by calling Value.AsInterface. -func (x *ListValue) AsSlice() []interface{} { +func (x *ListValue) AsSlice() []any { vals := x.GetValues() - vs := make([]interface{}, len(vals)) + vs := make([]any, len(vals)) for i, v := range vals { vs[i] = v.AsInterface() } @@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte { var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_google_protobuf_struct_proto_goTypes = []interface{}{ +var file_google_protobuf_struct_proto_goTypes = []any{ (NullValue)(0), // 0: google.protobuf.NullValue (*Struct)(nil), // 1: google.protobuf.Struct (*Value)(nil), // 2: google.protobuf.Value @@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Struct); i { case 0: return &v.state @@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Value); i { case 0: return &v.state @@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() { return nil } } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ListValue); i { case 0: return &v.state @@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() { } } } - file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), (*Value_StringValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a33..83a5a645 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 762a8713..e473f826 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { } var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_google_protobuf_wrappers_proto_goTypes = []interface{}{ +var file_google_protobuf_wrappers_proto_goTypes = []any{ (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue (*FloatValue)(nil), // 1: google.protobuf.FloatValue (*Int64Value)(nil), // 2: google.protobuf.Int64Value @@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*DoubleValue); i { case 0: return &v.state @@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FloatValue); i { case 0: return &v.state @@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Int64Value); i { case 0: return &v.state @@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*UInt64Value); i { case 0: return &v.state @@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Int32Value); i { case 0: return &v.state @@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*UInt32Value); i { case 0: return &v.state @@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*BoolValue); i { case 0: return &v.state @@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*StringValue); i { case 0: return &v.state @@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() { return nil } } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*BytesValue); i { case 0: return &v.state diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index 155648ac..1a9f5e77 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - deads2k diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 60c8209d..cbdf2eeb 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -22,14 +22,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true +// if the conditions are changed by this call. // conditions must be non-nil. // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to // newCondition, LastTransitionTime is set to now if the new status differs from the old status) // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { +func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) { if conditions == nil { - return + return false } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -37,7 +38,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond newCondition.LastTransitionTime = metav1.NewTime(time.Now()) } *conditions = append(*conditions, newCondition) - return + return true } if existingCondition.Status != newCondition.Status { @@ -47,18 +48,31 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond } else { existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } + changed = true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - existingCondition.ObservedGeneration = newCondition.ObservedGeneration + if existingCondition.Reason != newCondition.Reason { + existingCondition.Reason = newCondition.Reason + changed = true + } + if existingCondition.Message != newCondition.Message { + existingCondition.Message = newCondition.Message + changed = true + } + if existingCondition.ObservedGeneration != newCondition.ObservedGeneration { + existingCondition.ObservedGeneration = newCondition.ObservedGeneration + changed = true + } + + return changed } -// RemoveStatusCondition removes the corresponding conditionType from conditions. +// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns +// true if it was present and got removed. // conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { +func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) { if conditions == nil || len(*conditions) == 0 { - return + return false } newConditions := make([]metav1.Condition, 0, len(*conditions)-1) for _, condition := range *conditions { @@ -67,7 +81,10 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) } } + removed = len(*conditions) != len(newConditions) *conditions = newConditions + + return removed } // FindStatusCondition finds the conditionType in conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 899d3e8a..1fdd32c4 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -40,8 +40,7 @@ var ( // IsListType returns true if the provided Object has a slice called Items. // TODO: Replace the code in this check with an interface comparison by -// -// creating and enforcing that lists implement a list accessor. +// creating and enforcing that lists implement a list accessor. func IsListType(obj runtime.Object) bool { switch t := obj.(type) { case runtime.Unstructured: @@ -113,8 +112,27 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { // EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates // the loop. +// +// If items passed to fn are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use EachListItemWithAlloc instead. func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, false) +} + +// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. +func EachListItemWithAlloc(obj runtime.Object, fn func(runtime.Object) error) error { + return eachListItem(obj, fn, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func eachListItem(obj runtime.Object, fn func(runtime.Object) error, allocNew bool) error { if unstructured, ok := obj.(runtime.Unstructured); ok { + if allocNew { + return unstructured.EachListItemWithAlloc(fn) + } return unstructured.EachListItem(fn) } // TODO: Change to an interface call? @@ -141,8 +159,19 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { for i := 0; i < len; i++ { raw := items.Index(i) if takeAddr { - raw = raw.Addr() + if allocNew { + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + // reflect.New will guarantee that itemCopy must be a pointer. + raw = itemCopy + } else { + raw = raw.Addr() + } } + // raw must be a pointer or an interface + // allocate a pointer is cheap switch item := raw.Interface().(type) { case *runtime.RawExtension: if err := fn(item.Object); err != nil { @@ -167,7 +196,23 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { // ExtractList returns obj's Items element as an array of runtime.Objects. // Returns an error if obj is not a List type (does not have an Items member). +// +// If items in the returned list are retained for different durations, and you want to avoid +// retaining all items in obj as long as any item is referenced, use ExtractListWithAlloc instead. func ExtractList(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, false) +} + +// ExtractListWithAlloc works like ExtractList, but avoids retaining references to the items slice in obj. +// It does this by making a shallow copy of non-pointer items in obj. +// +// If the items in the returned list are not retained, or are retained for the same duration, use ExtractList instead for memory efficiency. +func ExtractListWithAlloc(obj runtime.Object) ([]runtime.Object, error) { + return extractList(obj, true) +} + +// allocNew: Whether shallow copy is required when the elements in Object.Items are struct +func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) { itemsPtr, err := GetItemsPtr(obj) if err != nil { return nil, err @@ -177,10 +222,17 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return nil, err } list := make([]runtime.Object, items.Len()) + if len(list) == 0 { + return list, nil + } + elemType := items.Type().Elem() + isRawExtension := elemType == rawExtensionObjectType + implementsObject := elemType.Implements(objectType) for i := range list { raw := items.Index(i) - switch item := raw.Interface().(type) { - case runtime.RawExtension: + switch { + case isRawExtension: + item := raw.Interface().(runtime.RawExtension) switch { case item.Object != nil: list[i] = item.Object @@ -190,8 +242,18 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { default: list[i] = nil } - case runtime.Object: - list[i] = item + case implementsObject: + list[i] = raw.Interface().(runtime.Object) + case allocNew: + // shallow copy to avoid retaining a reference to the original list item + itemCopy := reflect.New(raw.Type()) + // assign to itemCopy and type-assert + itemCopy.Elem().Set(raw) + var ok bool + // reflect.New will guarantee that itemCopy must be a pointer. + if list[i], ok = itemCopy.Interface().(runtime.Object); !ok { + return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } default: var found bool if list[i], found = raw.Addr().Interface().(runtime.Object); !found { @@ -202,8 +264,12 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { return list, nil } -// objectSliceType is the type of a slice of Objects -var objectSliceType = reflect.TypeOf([]runtime.Object{}) +var ( + // objectSliceType is the type of a slice of Objects + objectSliceType = reflect.TypeOf([]runtime.Object{}) + objectType = reflect.TypeOf((*runtime.Object)(nil)).Elem() + rawExtensionObjectType = reflect.TypeOf(runtime.RawExtension{}) +) // LenList returns the length of this list or 0 if it is not a list. func LenList(list runtime.Object) int { @@ -238,7 +304,7 @@ func SetList(list runtime.Object, objects []runtime.Object) error { slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) for i := range objects { dest := slice.Index(i) - if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { + if dest.Type() == rawExtensionObjectType { dest = dest.FieldByName("Object") } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index d1c9f530..063fd285 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -2,7 +2,6 @@ reviewers: - thockin - - lavalamp - smarterclayton - wojtek-t - derekwaynecarr diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go index a8866a43..2eebec66 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -203,6 +203,44 @@ func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } +// Mul multiplies the provided b to the current amount, or +// returns false if overflow or underflow would result. +func (a *int64Amount) Mul(b int64) bool { + switch { + case a.value == 0: + return true + case b == 0: + a.value = 0 + a.scale = 0 + return true + case a.scale == 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + a.value = c + case a.scale > 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = positiveScaleInt64(c, a.scale); !ok { + return false + } + a.value = c + default: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = negativeScaleInt64(c, -a.scale); !ok { + return false + } + a.value = c + } + return true +} + // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 53a25d34..c3a27216 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// source: k8s.io/apimachinery/pkg/api/resource/generated.proto package resource @@ -41,7 +41,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *Quantity) Reset() { *m = Quantity{} } func (*Quantity) ProtoMessage() {} func (*Quantity) Descriptor() ([]byte, []int) { - return fileDescriptor_612bba87bd70906c, []int{0} + return fileDescriptor_7288c78ff45111e9, []int{0} } func (m *Quantity) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Quantity.Unmarshal(m, b) @@ -64,7 +64,7 @@ var xxx_messageInfo_Quantity proto.InternalMessageInfo func (m *QuantityValue) Reset() { *m = QuantityValue{} } func (*QuantityValue) ProtoMessage() {} func (*QuantityValue) Descriptor() ([]byte, []int) { - return fileDescriptor_612bba87bd70906c, []int{1} + return fileDescriptor_7288c78ff45111e9, []int{1} } func (m *QuantityValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_QuantityValue.Unmarshal(m, b) @@ -90,25 +90,24 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) + proto.RegisterFile("k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_7288c78ff45111e9) } -var fileDescriptor_612bba87bd70906c = []byte{ - // 254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0xcd, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0x4b, - 0xcd, 0x4b, 0xc9, 0x2f, 0xd2, 0x87, 0x4a, 0x24, 0x16, 0x64, 0xe6, 0x26, 0x26, 0x67, 0x64, 0xe6, - 0xa5, 0x16, 0x55, 0xea, 0x17, 0x64, 0xa7, 0x83, 0x04, 0xf4, 0x8b, 0x52, 0x8b, 0xf3, 0x4b, 0x8b, - 0x92, 0x53, 0xf5, 0xd3, 0x53, 0xf3, 0x52, 0x8b, 0x12, 0x4b, 0x52, 0x53, 0xf4, 0x0a, 0x8a, 0xf2, - 0x4b, 0xf2, 0x85, 0x54, 0x20, 0xba, 0xf4, 0x90, 0x75, 0xe9, 0x15, 0x64, 0xa7, 0x83, 0x04, 0xf4, - 0x60, 0xba, 0xa4, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, - 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x18, - 0xaa, 0x64, 0xc1, 0xc5, 0x11, 0x58, 0x9a, 0x98, 0x57, 0x92, 0x59, 0x52, 0x29, 0x24, 0xc6, 0xc5, - 0x56, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe5, 0x59, - 0x89, 0xcc, 0x58, 0x20, 0xcf, 0xd0, 0xb1, 0x50, 0x9e, 0x61, 0xc2, 0x42, 0x79, 0x86, 0x05, 0x0b, - 0xe5, 0x19, 0x1a, 0xee, 0x28, 0x30, 0x28, 0xd9, 0x72, 0xf1, 0xc2, 0x74, 0x86, 0x25, 0xe6, 0x94, - 0xa6, 0x92, 0xa6, 0xdd, 0xc9, 0xeb, 0xc4, 0x43, 0x39, 0x86, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, - 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x37, - 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0x43, 0x94, 0x0a, 0x31, 0x21, - 0x05, 0x08, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x70, 0x98, 0xa3, 0x69, 0x01, 0x00, 0x00, +var fileDescriptor_7288c78ff45111e9 = []byte{ + // 234 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xc9, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0x17, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0xea, 0xa7, + 0xa7, 0xe6, 0xa5, 0x16, 0x25, 0x96, 0xa4, 0xa6, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xa9, + 0x40, 0x74, 0xe9, 0x21, 0xeb, 0xd2, 0x2b, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0xc1, 0x74, 0x49, 0xe9, + 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, + 0x83, 0x35, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x54, 0xc9, 0x82, 0x8b, + 0x23, 0xb0, 0x34, 0x31, 0xaf, 0x24, 0xb3, 0xa4, 0x52, 0x48, 0x8c, 0x8b, 0xad, 0xb8, 0xa4, 0x28, + 0x33, 0x2f, 0x5d, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xca, 0xb3, 0x12, 0x99, 0xb1, 0x40, + 0x9e, 0xa1, 0x63, 0xa1, 0x3c, 0xc3, 0x84, 0x85, 0xf2, 0x0c, 0x0b, 0x16, 0xca, 0x33, 0x34, 0xdc, + 0x51, 0x60, 0x50, 0xb2, 0xe5, 0xe2, 0x85, 0xe9, 0x0c, 0x4b, 0xcc, 0x29, 0x4d, 0x25, 0x4d, 0xbb, + 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, + 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x6f, 0x3c, 0x92, 0x63, 0x7c, + 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x28, 0x15, 0x62, 0x42, 0x0a, 0x10, 0x00, 0x00, + 0xff, 0xff, 0x50, 0x91, 0xd0, 0x9c, 0x50, 0x01, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index b47d554b..50af8334 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + inf "gopkg.in/inf.v0" ) @@ -592,6 +594,16 @@ func (q *Quantity) Sub(y Quantity) { q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } +// Mul multiplies the provided y to the current value. +// It will return false if the result is inexact. Otherwise, it will return true. +func (q *Quantity) Mul(y int64) bool { + q.s = "" + if q.d.Dec == nil && q.i.Mul(y) { + return true + } + return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64() +} + // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { @@ -673,6 +685,12 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +func (q Quantity) MarshalCBOR() ([]byte, error) { + // The call to String() should never return the string "" because the receiver's + // address will never be nil. + return cbor.Marshal(q.String()) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (q Quantity) ToUnstructured() interface{} { return q.String() @@ -701,6 +719,27 @@ func (q *Quantity) UnmarshalJSON(value []byte) error { return nil } +func (q *Quantity) UnmarshalCBOR(value []byte) error { + var s *string + if err := cbor.Unmarshal(value, &s); err != nil { + return err + } + + if s == nil { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + + parsed, err := ParseQuantity(strings.TrimSpace(*s)) + if err != nil { + return err + } + + *q = parsed + return nil +} + // NewDecimalQuantity returns a new Quantity representing the given // value in the given format. func NewDecimalQuantity(b inf.Dec, format Format) *Quantity { diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS new file mode 100644 index 00000000..40237324 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go new file mode 100644 index 00000000..29c6a48b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// SetListOptionsDefaults sets defaults on the provided ListOptions if applicable. +// +// TODO(#115478): once the watch-list fg is always on we register this function in the scheme (via AddTypeDefaultingFunc). +// TODO(#115478): when the function is registered in the scheme remove all callers of this method. +func SetListOptionsDefaults(obj *ListOptions, isWatchListFeatureEnabled bool) { + if !isWatchListFeatureEnabled { + return + } + if obj.SendInitialEvents != nil || len(obj.ResourceVersionMatch) != 0 { + return + } + legacy := obj.ResourceVersion == "" || obj.ResourceVersion == "0" + if obj.Watch && legacy { + turnOnInitialEvents := true + obj.SendInitialEvents = &turnOnInitialEvents + obj.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index a49b5f2b..00d2b8c6 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -66,6 +66,31 @@ type ListOptions struct { // it does not recognize and will return a 410 error if the token can no longer be used because // it has expired. Continue string + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + SendInitialEvents *bool } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go index 8403d1a8..2734a8f3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go @@ -17,18 +17,20 @@ limitations under the License. package validation import ( + "fmt" + "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" ) // ValidateListOptions returns all validation errors found while validating the ListOptions. -func ValidateListOptions(options *internalversion.ListOptions) field.ErrorList { +func ValidateListOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + if options.Watch { + return validateWatchOptions(options, isWatchListFeatureEnabled) + } allErrs := field.ErrorList{} if match := options.ResourceVersionMatch; len(match) > 0 { - if options.Watch { - allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch")) - } if len(options.ResourceVersion) == 0 { allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided")) } @@ -42,5 +44,33 @@ func ValidateListOptions(options *internalversion.ListOptions) field.ErrorList { allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\"")) } } + if options.SendInitialEvents != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for list")) + } + return allErrs +} + +func validateWatchOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList { + allErrs := field.ErrorList{} + match := options.ResourceVersionMatch + if options.SendInitialEvents != nil { + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), fmt.Sprintf("sendInitialEvents requires setting resourceVersionMatch to %s", metav1.ResourceVersionMatchNotOlderThan))) + } + if !isWatchListFeatureEnabled { + allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for watch unless the WatchList feature gate is enabled")) + } + } + if len(match) > 0 { + if options.SendInitialEvents == nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch unless sendInitialEvents is provided")) + } + if match != metav1.ResourceVersionMatchNotOlderThan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchNotOlderThan)})) + } + if len(options.Continue) > 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided")) + } + } return allErrs } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go index 6d212b84..a6552c27 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -115,6 +115,7 @@ func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } @@ -137,6 +138,7 @@ func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOption out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) out.Limit = in.Limit out.Continue = in.Continue + out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents)) return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index 6e1eac5c..af66a2ac 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -75,6 +75,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 15b45ffa..5005beb1 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -18,6 +18,7 @@ package v1 import ( "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" ) // IsControlledBy checks if the object has a controllerRef set to the given owner @@ -36,10 +37,14 @@ func GetControllerOf(controllee Object) *OwnerReference { return nil } cp := *ref + cp.Controller = ptr.To(*ref.Controller) + if ref.BlockOwnerDeletion != nil { + cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion) + } return &cp } -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller func GetControllerOfNoCopy(controllee Object) *OwnerReference { refs := controllee.GetOwnerReferences() for i := range refs { @@ -52,14 +57,12 @@ func GetControllerOfNoCopy(controllee Object) *OwnerReference { // NewControllerRef creates an OwnerReference pointing to the given owner. func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { - blockOwnerDeletion := true - isController := true return &OwnerReference{ APIVersion: gvk.GroupVersion().String(), Kind: gvk.Kind, Name: owner.GetName(), UID: owner.GetUID(), - BlockOwnerDeletion: &blockOwnerDeletion, - Controller: &isController, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), } } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 7e00eb7d..229ea2c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// source: k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto package v1 @@ -52,7 +52,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *APIGroup) Reset() { *m = APIGroup{} } func (*APIGroup) ProtoMessage() {} func (*APIGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{0} + return fileDescriptor_a8431b6e0aeeb761, []int{0} } func (m *APIGroup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,7 +80,7 @@ var xxx_messageInfo_APIGroup proto.InternalMessageInfo func (m *APIGroupList) Reset() { *m = APIGroupList{} } func (*APIGroupList) ProtoMessage() {} func (*APIGroupList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{1} + return fileDescriptor_a8431b6e0aeeb761, []int{1} } func (m *APIGroupList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -108,7 +108,7 @@ var xxx_messageInfo_APIGroupList proto.InternalMessageInfo func (m *APIResource) Reset() { *m = APIResource{} } func (*APIResource) ProtoMessage() {} func (*APIResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{2} + return fileDescriptor_a8431b6e0aeeb761, []int{2} } func (m *APIResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -136,7 +136,7 @@ var xxx_messageInfo_APIResource proto.InternalMessageInfo func (m *APIResourceList) Reset() { *m = APIResourceList{} } func (*APIResourceList) ProtoMessage() {} func (*APIResourceList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{3} + return fileDescriptor_a8431b6e0aeeb761, []int{3} } func (m *APIResourceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -164,7 +164,7 @@ var xxx_messageInfo_APIResourceList proto.InternalMessageInfo func (m *APIVersions) Reset() { *m = APIVersions{} } func (*APIVersions) ProtoMessage() {} func (*APIVersions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{4} + return fileDescriptor_a8431b6e0aeeb761, []int{4} } func (m *APIVersions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -192,7 +192,7 @@ var xxx_messageInfo_APIVersions proto.InternalMessageInfo func (m *ApplyOptions) Reset() { *m = ApplyOptions{} } func (*ApplyOptions) ProtoMessage() {} func (*ApplyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{5} + return fileDescriptor_a8431b6e0aeeb761, []int{5} } func (m *ApplyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -220,7 +220,7 @@ var xxx_messageInfo_ApplyOptions proto.InternalMessageInfo func (m *Condition) Reset() { *m = Condition{} } func (*Condition) ProtoMessage() {} func (*Condition) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{6} + return fileDescriptor_a8431b6e0aeeb761, []int{6} } func (m *Condition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -248,7 +248,7 @@ var xxx_messageInfo_Condition proto.InternalMessageInfo func (m *CreateOptions) Reset() { *m = CreateOptions{} } func (*CreateOptions) ProtoMessage() {} func (*CreateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{7} + return fileDescriptor_a8431b6e0aeeb761, []int{7} } func (m *CreateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -276,7 +276,7 @@ var xxx_messageInfo_CreateOptions proto.InternalMessageInfo func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (*DeleteOptions) ProtoMessage() {} func (*DeleteOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{8} + return fileDescriptor_a8431b6e0aeeb761, []int{8} } func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -304,7 +304,7 @@ var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo func (m *Duration) Reset() { *m = Duration{} } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{9} + return fileDescriptor_a8431b6e0aeeb761, []int{9} } func (m *Duration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,10 +329,38 @@ func (m *Duration) XXX_DiscardUnknown() { var xxx_messageInfo_Duration proto.InternalMessageInfo +func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} } +func (*FieldSelectorRequirement) ProtoMessage() {} +func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_a8431b6e0aeeb761, []int{10} +} +func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldSelectorRequirement.Merge(m, src) +} +func (m *FieldSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *FieldSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo + func (m *FieldsV1) Reset() { *m = FieldsV1{} } func (*FieldsV1) ProtoMessage() {} func (*FieldsV1) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{10} + return fileDescriptor_a8431b6e0aeeb761, []int{11} } func (m *FieldsV1) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -360,7 +388,7 @@ var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo func (m *GetOptions) Reset() { *m = GetOptions{} } func (*GetOptions) ProtoMessage() {} func (*GetOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{11} + return fileDescriptor_a8431b6e0aeeb761, []int{12} } func (m *GetOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -388,7 +416,7 @@ var xxx_messageInfo_GetOptions proto.InternalMessageInfo func (m *GroupKind) Reset() { *m = GroupKind{} } func (*GroupKind) ProtoMessage() {} func (*GroupKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{12} + return fileDescriptor_a8431b6e0aeeb761, []int{13} } func (m *GroupKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -416,7 +444,7 @@ var xxx_messageInfo_GroupKind proto.InternalMessageInfo func (m *GroupResource) Reset() { *m = GroupResource{} } func (*GroupResource) ProtoMessage() {} func (*GroupResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{13} + return fileDescriptor_a8431b6e0aeeb761, []int{14} } func (m *GroupResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -444,7 +472,7 @@ var xxx_messageInfo_GroupResource proto.InternalMessageInfo func (m *GroupVersion) Reset() { *m = GroupVersion{} } func (*GroupVersion) ProtoMessage() {} func (*GroupVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{14} + return fileDescriptor_a8431b6e0aeeb761, []int{15} } func (m *GroupVersion) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -472,7 +500,7 @@ var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{15} + return fileDescriptor_a8431b6e0aeeb761, []int{16} } func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -500,7 +528,7 @@ var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } func (*GroupVersionKind) ProtoMessage() {} func (*GroupVersionKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{16} + return fileDescriptor_a8431b6e0aeeb761, []int{17} } func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +556,7 @@ var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } func (*GroupVersionResource) ProtoMessage() {} func (*GroupVersionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{17} + return fileDescriptor_a8431b6e0aeeb761, []int{18} } func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -556,7 +584,7 @@ var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo func (m *LabelSelector) Reset() { *m = LabelSelector{} } func (*LabelSelector) ProtoMessage() {} func (*LabelSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{18} + return fileDescriptor_a8431b6e0aeeb761, []int{19} } func (m *LabelSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +612,7 @@ var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{19} + return fileDescriptor_a8431b6e0aeeb761, []int{20} } func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +640,7 @@ var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{20} + return fileDescriptor_a8431b6e0aeeb761, []int{21} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +668,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *ListMeta) Reset() { *m = ListMeta{} } func (*ListMeta) ProtoMessage() {} func (*ListMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{21} + return fileDescriptor_a8431b6e0aeeb761, []int{22} } func (m *ListMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +696,7 @@ var xxx_messageInfo_ListMeta proto.InternalMessageInfo func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} func (*ListOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{22} + return fileDescriptor_a8431b6e0aeeb761, []int{23} } func (m *ListOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +724,7 @@ var xxx_messageInfo_ListOptions proto.InternalMessageInfo func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } func (*ManagedFieldsEntry) ProtoMessage() {} func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{23} + return fileDescriptor_a8431b6e0aeeb761, []int{24} } func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +752,7 @@ var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo func (m *MicroTime) Reset() { *m = MicroTime{} } func (*MicroTime) ProtoMessage() {} func (*MicroTime) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{24} + return fileDescriptor_a8431b6e0aeeb761, []int{25} } func (m *MicroTime) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MicroTime.Unmarshal(m, b) @@ -747,7 +775,7 @@ var xxx_messageInfo_MicroTime proto.InternalMessageInfo func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{25} + return fileDescriptor_a8431b6e0aeeb761, []int{26} } func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -775,7 +803,7 @@ var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{26} + return fileDescriptor_a8431b6e0aeeb761, []int{27} } func (m *OwnerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -803,7 +831,7 @@ var xxx_messageInfo_OwnerReference proto.InternalMessageInfo func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } func (*PartialObjectMetadata) ProtoMessage() {} func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{27} + return fileDescriptor_a8431b6e0aeeb761, []int{28} } func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -831,7 +859,7 @@ var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{28} + return fileDescriptor_a8431b6e0aeeb761, []int{29} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -859,7 +887,7 @@ var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo func (m *Patch) Reset() { *m = Patch{} } func (*Patch) ProtoMessage() {} func (*Patch) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{29} + return fileDescriptor_a8431b6e0aeeb761, []int{30} } func (m *Patch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -887,7 +915,7 @@ var xxx_messageInfo_Patch proto.InternalMessageInfo func (m *PatchOptions) Reset() { *m = PatchOptions{} } func (*PatchOptions) ProtoMessage() {} func (*PatchOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{30} + return fileDescriptor_a8431b6e0aeeb761, []int{31} } func (m *PatchOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -915,7 +943,7 @@ var xxx_messageInfo_PatchOptions proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{31} + return fileDescriptor_a8431b6e0aeeb761, []int{32} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -943,7 +971,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} func (*RootPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{32} + return fileDescriptor_a8431b6e0aeeb761, []int{33} } func (m *RootPaths) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -971,7 +999,7 @@ var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{33} + return fileDescriptor_a8431b6e0aeeb761, []int{34} } func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -999,7 +1027,7 @@ var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{34} + return fileDescriptor_a8431b6e0aeeb761, []int{35} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1027,7 +1055,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} func (*StatusCause) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{35} + return fileDescriptor_a8431b6e0aeeb761, []int{36} } func (m *StatusCause) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1055,7 +1083,7 @@ var xxx_messageInfo_StatusCause proto.InternalMessageInfo func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} func (*StatusDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{36} + return fileDescriptor_a8431b6e0aeeb761, []int{37} } func (m *StatusDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1083,7 +1111,7 @@ var xxx_messageInfo_StatusDetails proto.InternalMessageInfo func (m *TableOptions) Reset() { *m = TableOptions{} } func (*TableOptions) ProtoMessage() {} func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{37} + return fileDescriptor_a8431b6e0aeeb761, []int{38} } func (m *TableOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1111,7 +1139,7 @@ var xxx_messageInfo_TableOptions proto.InternalMessageInfo func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{38} + return fileDescriptor_a8431b6e0aeeb761, []int{39} } func (m *Time) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Time.Unmarshal(m, b) @@ -1134,7 +1162,7 @@ var xxx_messageInfo_Time proto.InternalMessageInfo func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{39} + return fileDescriptor_a8431b6e0aeeb761, []int{40} } func (m *Timestamp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1190,7 @@ var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{40} + return fileDescriptor_a8431b6e0aeeb761, []int{41} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1190,7 +1218,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } func (*UpdateOptions) ProtoMessage() {} func (*UpdateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{41} + return fileDescriptor_a8431b6e0aeeb761, []int{42} } func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1246,7 @@ var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} func (*Verbs) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{42} + return fileDescriptor_a8431b6e0aeeb761, []int{43} } func (m *Verbs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1246,7 +1274,7 @@ var xxx_messageInfo_Verbs proto.InternalMessageInfo func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} func (*WatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{43} + return fileDescriptor_a8431b6e0aeeb761, []int{44} } func (m *WatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1282,6 +1310,7 @@ func init() { proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement") proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") @@ -1322,189 +1351,191 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) -} - -var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2842 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4b, 0x6f, 0x24, 0x47, - 0xd9, 0x3d, 0x0f, 0x7b, 0xe6, 0x9b, 0x19, 0x3f, 0x6a, 0xbd, 0x30, 0x6b, 0x84, 0xc7, 0xe9, 0x44, - 0xd1, 0x06, 0x92, 0x71, 0x76, 0x09, 0xd1, 0x66, 0x43, 0x02, 0x1e, 0xcf, 0x7a, 0xe3, 0x64, 0x1d, - 0x5b, 0xe5, 0xdd, 0x05, 0x42, 0x84, 0xd2, 0x9e, 0x2e, 0x8f, 0x1b, 0xf7, 0x74, 0x4f, 0xaa, 0x7a, - 0xbc, 0x19, 0x38, 0x90, 0x03, 0x08, 0x90, 0x50, 0x14, 0x6e, 0x9c, 0x50, 0x22, 0xf8, 0x01, 0x88, - 0x0b, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xc4, 0x1c, 0x38, 0x22, 0xae, 0xbe, - 0x80, 0xea, 0xd1, 0xdd, 0xd5, 0xf3, 0x58, 0xf7, 0x64, 0x97, 0x88, 0xdb, 0xf4, 0xf7, 0xae, 0xaa, - 0xaf, 0xbe, 0x47, 0x7d, 0x03, 0x3b, 0xc7, 0xd7, 0x58, 0xdd, 0xf1, 0xd7, 0x8f, 0x7b, 0x07, 0x84, - 0x7a, 0x24, 0x20, 0x6c, 0xfd, 0x84, 0x78, 0xb6, 0x4f, 0xd7, 0x15, 0xc2, 0xea, 0x3a, 0x1d, 0xab, - 0x75, 0xe4, 0x78, 0x84, 0xf6, 0xd7, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0xeb, 0x1d, 0x12, 0x58, 0xeb, - 0x27, 0x57, 0xd6, 0xdb, 0xc4, 0x23, 0xd4, 0x0a, 0x88, 0x5d, 0xef, 0x52, 0x3f, 0xf0, 0xd1, 0x63, - 0x92, 0xab, 0xae, 0x73, 0xd5, 0xbb, 0xc7, 0x6d, 0x0e, 0x60, 0x75, 0xce, 0x55, 0x3f, 0xb9, 0xb2, - 0xf2, 0x54, 0xdb, 0x09, 0x8e, 0x7a, 0x07, 0xf5, 0x96, 0xdf, 0x59, 0x6f, 0xfb, 0x6d, 0x7f, 0x5d, - 0x30, 0x1f, 0xf4, 0x0e, 0xc5, 0x97, 0xf8, 0x10, 0xbf, 0xa4, 0xd0, 0x95, 0x89, 0xa6, 0xd0, 0x9e, - 0x17, 0x38, 0x1d, 0x32, 0x6c, 0xc5, 0xca, 0xb3, 0xe7, 0x31, 0xb0, 0xd6, 0x11, 0xe9, 0x58, 0xc3, - 0x7c, 0xe6, 0x9f, 0xb3, 0x50, 0xd8, 0xd8, 0xdb, 0xbe, 0x49, 0xfd, 0x5e, 0x17, 0xad, 0x41, 0xce, - 0xb3, 0x3a, 0xa4, 0x6a, 0xac, 0x19, 0x97, 0x8b, 0x8d, 0xf2, 0x07, 0x83, 0xda, 0xcc, 0xe9, 0xa0, - 0x96, 0x7b, 0xd5, 0xea, 0x10, 0x2c, 0x30, 0xc8, 0x85, 0xc2, 0x09, 0xa1, 0xcc, 0xf1, 0x3d, 0x56, - 0xcd, 0xac, 0x65, 0x2f, 0x97, 0xae, 0xbe, 0x58, 0x4f, 0xb3, 0xfe, 0xba, 0x50, 0x70, 0x57, 0xb2, - 0x6e, 0xf9, 0xb4, 0xe9, 0xb0, 0x96, 0x7f, 0x42, 0x68, 0xbf, 0xb1, 0xa8, 0xb4, 0x14, 0x14, 0x92, - 0xe1, 0x48, 0x03, 0xfa, 0x91, 0x01, 0x8b, 0x5d, 0x4a, 0x0e, 0x09, 0xa5, 0xc4, 0x56, 0xf8, 0x6a, - 0x76, 0xcd, 0x78, 0x08, 0x6a, 0xab, 0x4a, 0xed, 0xe2, 0xde, 0x90, 0x7c, 0x3c, 0xa2, 0x11, 0xfd, - 0xda, 0x80, 0x15, 0x46, 0xe8, 0x09, 0xa1, 0x1b, 0xb6, 0x4d, 0x09, 0x63, 0x8d, 0xfe, 0xa6, 0xeb, - 0x10, 0x2f, 0xd8, 0xdc, 0x6e, 0x62, 0x56, 0xcd, 0x89, 0x7d, 0xf8, 0x7a, 0x3a, 0x83, 0xf6, 0x27, - 0xc9, 0x69, 0x98, 0xca, 0xa2, 0x95, 0x89, 0x24, 0x0c, 0xdf, 0xc7, 0x0c, 0xf3, 0x10, 0xca, 0xe1, - 0x41, 0xde, 0x72, 0x58, 0x80, 0xee, 0xc2, 0x6c, 0x9b, 0x7f, 0xb0, 0xaa, 0x21, 0x0c, 0xac, 0xa7, - 0x33, 0x30, 0x94, 0xd1, 0x98, 0x57, 0xf6, 0xcc, 0x8a, 0x4f, 0x86, 0x95, 0x34, 0xf3, 0x67, 0x39, - 0x28, 0x6d, 0xec, 0x6d, 0x63, 0xc2, 0xfc, 0x1e, 0x6d, 0x91, 0x14, 0x4e, 0x73, 0x0d, 0xca, 0xcc, - 0xf1, 0xda, 0x3d, 0xd7, 0xa2, 0x1c, 0x5a, 0x9d, 0x15, 0x94, 0xcb, 0x8a, 0xb2, 0xbc, 0xaf, 0xe1, - 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xd7, 0x6a, 0x11, 0xbb, 0x9a, 0x59, 0x33, 0x2e, - 0x17, 0x1a, 0x48, 0xf1, 0xc1, 0xab, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x51, 0xc8, 0x0b, 0x4b, 0xab, - 0x05, 0xa1, 0xa6, 0xa2, 0xc8, 0xf3, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x13, 0x30, 0xa7, 0xbc, 0xac, - 0x5a, 0x14, 0x64, 0x0b, 0x8a, 0x6c, 0x2e, 0x74, 0x83, 0x10, 0xcf, 0xd7, 0x77, 0xec, 0x78, 0xb6, - 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x38, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, 0xfc, 0x09, 0xa1, 0x07, - 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0x8d, 0x22, 0x37, 0x4d, 0xfc, - 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x9f, 0x06, 0x62, 0x79, 0xd5, 0xfc, 0x5a, 0xf6, 0x72, - 0xb1, 0x31, 0xcf, 0xd7, 0xbb, 0x1f, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x5b, 0x56, 0x40, 0xda, 0x3e, - 0x75, 0x08, 0xab, 0xce, 0xc5, 0xf4, 0x9b, 0x11, 0x14, 0x6b, 0x14, 0xe8, 0x65, 0x40, 0x2c, 0xf0, - 0xa9, 0xd5, 0x26, 0x6a, 0xa9, 0x2f, 0x59, 0xec, 0xa8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, 0x75, 0x68, - 0x7f, 0x84, 0x02, 0x8f, 0xe1, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, 0xae, 0x41, - 0xb9, 0xad, 0xdd, 0x3a, 0xe5, 0x17, 0xd1, 0x69, 0xeb, 0x37, 0x12, 0x27, 0x28, 0x11, 0x81, 0x22, - 0x55, 0x92, 0xc2, 0xe8, 0x72, 0x25, 0xb5, 0xd3, 0x86, 0x36, 0xc4, 0x9a, 0x34, 0x20, 0xc3, 0xb1, - 0x64, 0xf3, 0x9f, 0x86, 0x70, 0xe0, 0x30, 0xde, 0xa0, 0xcb, 0x5a, 0x4c, 0x33, 0xc4, 0xf6, 0x95, - 0x27, 0xc4, 0xa3, 0x73, 0x02, 0x41, 0xe6, 0xff, 0x22, 0x10, 0x5c, 0x2f, 0xfc, 0xf2, 0xbd, 0xda, - 0xcc, 0xdb, 0x7f, 0x5f, 0x9b, 0x31, 0x7f, 0x61, 0x40, 0x79, 0xa3, 0xdb, 0x75, 0xfb, 0xbb, 0xdd, - 0x40, 0x2c, 0xc0, 0x84, 0x59, 0x9b, 0xf6, 0x71, 0xcf, 0x53, 0x0b, 0x05, 0x7e, 0xbf, 0x9b, 0x02, - 0x82, 0x15, 0x86, 0xdf, 0x9f, 0x43, 0x9f, 0xb6, 0x88, 0xba, 0x6e, 0xd1, 0xfd, 0xd9, 0xe2, 0x40, - 0x2c, 0x71, 0xfc, 0x90, 0x0f, 0x1d, 0xe2, 0xda, 0x3b, 0x96, 0x67, 0xb5, 0x09, 0x55, 0x97, 0x23, - 0xda, 0xfa, 0x2d, 0x0d, 0x87, 0x13, 0x94, 0xe6, 0x7f, 0x32, 0x50, 0xdc, 0xf4, 0x3d, 0xdb, 0x09, - 0xd4, 0xe5, 0x0a, 0xfa, 0xdd, 0x91, 0xe0, 0x71, 0xbb, 0xdf, 0x25, 0x58, 0x60, 0xd0, 0x73, 0x30, - 0xcb, 0x02, 0x2b, 0xe8, 0x31, 0x61, 0x4f, 0xb1, 0xf1, 0x48, 0x18, 0x96, 0xf6, 0x05, 0xf4, 0x6c, - 0x50, 0x5b, 0x88, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x03, 0xb1, 0x51, 0xf6, 0x4d, - 0x99, 0xf6, 0xc2, 0xfc, 0x91, 0x8d, 0x3d, 0x7d, 0x77, 0x84, 0x02, 0x8f, 0xe1, 0x42, 0x27, 0x80, - 0x5c, 0x8b, 0x05, 0xb7, 0xa9, 0xe5, 0x31, 0xa1, 0xeb, 0xb6, 0xd3, 0x21, 0xea, 0xc2, 0x7f, 0x29, - 0xdd, 0x89, 0x73, 0x8e, 0x58, 0xef, 0xad, 0x11, 0x69, 0x78, 0x8c, 0x06, 0xf4, 0x38, 0xcc, 0x52, - 0x62, 0x31, 0xdf, 0xab, 0xe6, 0xc5, 0xf2, 0xa3, 0xa8, 0x8c, 0x05, 0x14, 0x2b, 0x2c, 0x0f, 0x68, - 0x1d, 0xc2, 0x98, 0xd5, 0x0e, 0xc3, 0x6b, 0x14, 0xd0, 0x76, 0x24, 0x18, 0x87, 0x78, 0xf3, 0xb7, - 0x06, 0x54, 0x36, 0x29, 0xb1, 0x02, 0x32, 0x8d, 0x5b, 0x7c, 0xea, 0x13, 0x47, 0x1b, 0xb0, 0x20, - 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0xc8, 0x09, 0xe6, 0xcf, 0x2b, 0xe6, 0x85, 0xad, 0x24, - 0x1a, 0x0f, 0xd3, 0x9b, 0x3f, 0xc9, 0x42, 0xa5, 0x49, 0x5c, 0x12, 0x9b, 0xbc, 0x05, 0xa8, 0x4d, - 0xad, 0x16, 0xd9, 0x23, 0xd4, 0xf1, 0xed, 0x7d, 0xd2, 0xf2, 0x3d, 0x9b, 0x09, 0x37, 0xca, 0x36, - 0x3e, 0xc7, 0xf7, 0xf7, 0xe6, 0x08, 0x16, 0x8f, 0xe1, 0x40, 0x2e, 0x54, 0xba, 0x54, 0xfc, 0x16, - 0x7b, 0x2e, 0xbd, 0xac, 0x74, 0xf5, 0x2b, 0xe9, 0x8e, 0x74, 0x4f, 0x67, 0x6d, 0x2c, 0x9d, 0x0e, - 0x6a, 0x95, 0x04, 0x08, 0x27, 0x85, 0xa3, 0x6f, 0xc0, 0xa2, 0x4f, 0xbb, 0x47, 0x96, 0xd7, 0x24, - 0x5d, 0xe2, 0xd9, 0xc4, 0x0b, 0x98, 0xd8, 0xc8, 0x42, 0x63, 0x99, 0xd7, 0x22, 0xbb, 0x43, 0x38, - 0x3c, 0x42, 0x8d, 0x5e, 0x83, 0xa5, 0x2e, 0xf5, 0xbb, 0x56, 0x5b, 0x6c, 0xcc, 0x9e, 0xef, 0x3a, - 0xad, 0xbe, 0xda, 0xce, 0x27, 0x4f, 0x07, 0xb5, 0xa5, 0xbd, 0x61, 0xe4, 0xd9, 0xa0, 0x76, 0x41, - 0x6c, 0x1d, 0x87, 0xc4, 0x48, 0x3c, 0x2a, 0x46, 0x73, 0x83, 0xfc, 0x24, 0x37, 0x30, 0xb7, 0xa1, - 0xd0, 0xec, 0xa9, 0x3b, 0xf1, 0x02, 0x14, 0x6c, 0xf5, 0x5b, 0xed, 0x7c, 0x78, 0x39, 0x23, 0x9a, - 0xb3, 0x41, 0xad, 0xc2, 0xcb, 0xcf, 0x7a, 0x08, 0xc0, 0x11, 0x8b, 0xf9, 0x38, 0x14, 0xc4, 0xc1, - 0xb3, 0xbb, 0x57, 0xd0, 0x22, 0x64, 0xb1, 0x75, 0x4f, 0x48, 0x29, 0x63, 0xfe, 0x53, 0x8b, 0x62, - 0xbb, 0x00, 0x37, 0x49, 0x10, 0x1e, 0xfc, 0x06, 0x2c, 0x84, 0xa1, 0x3c, 0x99, 0x61, 0x22, 0x6f, - 0xc2, 0x49, 0x34, 0x1e, 0xa6, 0x37, 0x5f, 0x87, 0xa2, 0xc8, 0x42, 0x3c, 0x85, 0xc7, 0xe5, 0x82, - 0x71, 0x9f, 0x72, 0x21, 0xac, 0x01, 0x32, 0x93, 0x6a, 0x00, 0xcd, 0x5c, 0x17, 0x2a, 0x92, 0x37, - 0x2c, 0x90, 0x52, 0x69, 0x78, 0x12, 0x0a, 0xa1, 0x99, 0x4a, 0x4b, 0x54, 0x18, 0x87, 0x82, 0x70, - 0x44, 0xa1, 0x69, 0x3b, 0x82, 0x44, 0x46, 0x4d, 0xa7, 0x4c, 0xab, 0x7e, 0x32, 0xf7, 0xaf, 0x7e, - 0x34, 0x4d, 0x3f, 0x84, 0xea, 0xa4, 0x6a, 0xfa, 0x01, 0x72, 0x7e, 0x7a, 0x53, 0xcc, 0x77, 0x0c, - 0x58, 0xd4, 0x25, 0xa5, 0x3f, 0xbe, 0xf4, 0x4a, 0xce, 0xaf, 0xf6, 0xb4, 0x1d, 0xf9, 0x95, 0x01, - 0xcb, 0x89, 0xa5, 0x4d, 0x75, 0xe2, 0x53, 0x18, 0xa5, 0x3b, 0x47, 0x76, 0x0a, 0xe7, 0xf8, 0x6b, - 0x06, 0x2a, 0xb7, 0xac, 0x03, 0xe2, 0xee, 0x13, 0x97, 0xb4, 0x02, 0x9f, 0xa2, 0x1f, 0x40, 0xa9, - 0x63, 0x05, 0xad, 0x23, 0x01, 0x0d, 0x3b, 0x83, 0x66, 0xba, 0x60, 0x97, 0x90, 0x54, 0xdf, 0x89, - 0xc5, 0xdc, 0xf0, 0x02, 0xda, 0x6f, 0x5c, 0x50, 0x26, 0x95, 0x34, 0x0c, 0xd6, 0xb5, 0x89, 0x76, - 0x4e, 0x7c, 0xdf, 0x78, 0xab, 0xcb, 0xcb, 0x96, 0xe9, 0xbb, 0xc8, 0x84, 0x09, 0x98, 0xbc, 0xd9, - 0x73, 0x28, 0xe9, 0x10, 0x2f, 0x88, 0xdb, 0xb9, 0x9d, 0x21, 0xf9, 0x78, 0x44, 0xe3, 0xca, 0x8b, - 0xb0, 0x38, 0x6c, 0x3c, 0x8f, 0x3f, 0xc7, 0xa4, 0x2f, 0xcf, 0x0b, 0xf3, 0x9f, 0x68, 0x19, 0xf2, - 0x27, 0x96, 0xdb, 0x53, 0xb7, 0x11, 0xcb, 0x8f, 0xeb, 0x99, 0x6b, 0x86, 0xf9, 0x1b, 0x03, 0xaa, - 0x93, 0x0c, 0x41, 0x5f, 0xd4, 0x04, 0x35, 0x4a, 0xca, 0xaa, 0xec, 0x2b, 0xa4, 0x2f, 0xa5, 0xde, - 0x80, 0x82, 0xdf, 0xe5, 0x35, 0x85, 0x4f, 0xd5, 0xa9, 0x3f, 0x11, 0x9e, 0xe4, 0xae, 0x82, 0x9f, - 0x0d, 0x6a, 0x17, 0x13, 0xe2, 0x43, 0x04, 0x8e, 0x58, 0x79, 0xa4, 0x16, 0xf6, 0xf0, 0xec, 0x11, - 0x45, 0xea, 0xbb, 0x02, 0x82, 0x15, 0xc6, 0xfc, 0x83, 0x01, 0x39, 0x51, 0x90, 0xbf, 0x0e, 0x05, - 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0xad, 0x20, 0xe7, 0xde, 0x21, 0x81, 0x15, 0x7b, - 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xef, 0x04, 0xa4, 0x13, 0x1e, 0xe4, 0x53, 0x13, 0x45, - 0xab, 0x87, 0x88, 0x3a, 0xb6, 0xee, 0xdd, 0x78, 0x2b, 0x20, 0x1e, 0x3f, 0x8c, 0xf8, 0x6a, 0x6c, - 0x73, 0x19, 0x58, 0x8a, 0x32, 0xff, 0x6d, 0x40, 0xa4, 0x8a, 0x3b, 0x3f, 0x23, 0xee, 0xe1, 0x2d, - 0xc7, 0x3b, 0x56, 0xdb, 0x1a, 0x99, 0xb3, 0xaf, 0xe0, 0x38, 0xa2, 0x18, 0x97, 0x1e, 0x32, 0xd3, - 0xa5, 0x07, 0xae, 0xb0, 0xe5, 0x7b, 0x81, 0xe3, 0xf5, 0x46, 0x6e, 0xdb, 0xa6, 0x82, 0xe3, 0x88, - 0x82, 0x17, 0x22, 0x94, 0x74, 0x2c, 0xc7, 0x73, 0xbc, 0x36, 0x5f, 0xc4, 0xa6, 0xdf, 0xf3, 0x02, - 0x91, 0x91, 0x55, 0x21, 0x82, 0x47, 0xb0, 0x78, 0x0c, 0x87, 0xf9, 0xfb, 0x1c, 0x94, 0xf8, 0x9a, - 0xc3, 0x3c, 0xf7, 0x3c, 0x54, 0x5c, 0xdd, 0x0b, 0xd4, 0xda, 0x2f, 0x2a, 0x53, 0x92, 0xf7, 0x1a, - 0x27, 0x69, 0x39, 0xb3, 0x28, 0xa1, 0x22, 0xe6, 0x4c, 0x92, 0x79, 0x4b, 0x47, 0xe2, 0x24, 0x2d, - 0x8f, 0x5e, 0xf7, 0xf8, 0xfd, 0x50, 0x95, 0x49, 0x74, 0x44, 0xdf, 0xe4, 0x40, 0x2c, 0x71, 0x68, - 0x07, 0x2e, 0x58, 0xae, 0xeb, 0xdf, 0x13, 0xc0, 0x86, 0xef, 0x1f, 0x77, 0x2c, 0x7a, 0xcc, 0x44, - 0x33, 0x5d, 0x68, 0x7c, 0x41, 0xb1, 0x5c, 0xd8, 0x18, 0x25, 0xc1, 0xe3, 0xf8, 0xc6, 0x1d, 0x5b, - 0x6e, 0xca, 0x63, 0x3b, 0x82, 0xe5, 0x21, 0x90, 0xb8, 0xe5, 0xaa, 0xb3, 0x7d, 0x46, 0xc9, 0x59, - 0xc6, 0x63, 0x68, 0xce, 0x26, 0xc0, 0xf1, 0x58, 0x89, 0xe8, 0x3a, 0xcc, 0x73, 0x4f, 0xf6, 0x7b, - 0x41, 0x58, 0x77, 0xe6, 0xc5, 0x71, 0xa3, 0xd3, 0x41, 0x6d, 0xfe, 0x76, 0x02, 0x83, 0x87, 0x28, - 0xf9, 0xe6, 0xba, 0x4e, 0xc7, 0x09, 0xaa, 0x73, 0x82, 0x25, 0xda, 0xdc, 0x5b, 0x1c, 0x88, 0x25, - 0x2e, 0xe1, 0x81, 0x85, 0xf3, 0x3c, 0xd0, 0xfc, 0x4b, 0x16, 0x90, 0xac, 0xb5, 0x6d, 0x59, 0x4f, - 0xc9, 0x90, 0xc6, 0x3b, 0x02, 0x55, 0xab, 0x1b, 0x43, 0x1d, 0x81, 0x2a, 0xd3, 0x43, 0x3c, 0xda, - 0x81, 0xa2, 0x0c, 0x2d, 0xf1, 0x75, 0x59, 0x57, 0xc4, 0xc5, 0xdd, 0x10, 0x71, 0x36, 0xa8, 0xad, - 0x24, 0xd4, 0x44, 0x18, 0xd1, 0xad, 0xc5, 0x12, 0xd0, 0x55, 0x00, 0xab, 0xeb, 0xe8, 0xef, 0x75, - 0xc5, 0xf8, 0xd5, 0x26, 0xee, 0xbc, 0xb1, 0x46, 0x85, 0x5e, 0x82, 0x5c, 0xf0, 0xe9, 0x3a, 0xaa, - 0x82, 0x68, 0x18, 0x79, 0xff, 0x24, 0x24, 0x70, 0xed, 0xc2, 0x9f, 0x19, 0x37, 0x4b, 0x35, 0x43, - 0x91, 0xf6, 0xad, 0x08, 0x83, 0x35, 0x2a, 0xf4, 0x2d, 0x28, 0x1c, 0xaa, 0x52, 0x54, 0x1c, 0x4c, - 0xea, 0x10, 0x19, 0x16, 0xb0, 0xf2, 0xc9, 0x20, 0xfc, 0xc2, 0x91, 0x34, 0xf4, 0x55, 0x28, 0xb1, - 0xde, 0x41, 0x94, 0xbd, 0xe5, 0x69, 0x46, 0xa9, 0x72, 0x3f, 0x46, 0x61, 0x9d, 0xce, 0x7c, 0x13, - 0x8a, 0x3b, 0x4e, 0x8b, 0xfa, 0xa2, 0x07, 0x7c, 0x02, 0xe6, 0x58, 0xa2, 0xc1, 0x89, 0x4e, 0x32, - 0xf4, 0xb2, 0x10, 0xcf, 0xdd, 0xcb, 0xb3, 0x3c, 0x5f, 0xb6, 0x31, 0xf9, 0xd8, 0xbd, 0x5e, 0xe5, - 0x40, 0x2c, 0x71, 0xd7, 0x97, 0x79, 0x81, 0xf0, 0xd3, 0xf7, 0x6b, 0x33, 0xef, 0xbe, 0x5f, 0x9b, - 0x79, 0xef, 0x7d, 0x55, 0x2c, 0xfc, 0x11, 0x00, 0x76, 0x0f, 0xbe, 0x47, 0x5a, 0x32, 0xec, 0xa6, - 0x7a, 0xd6, 0x0b, 0x5f, 0x93, 0xc5, 0xb3, 0x5e, 0x66, 0xa8, 0xe8, 0xd3, 0x70, 0x38, 0x41, 0x89, - 0xd6, 0xa1, 0x18, 0x3d, 0xd8, 0x29, 0xff, 0x58, 0x0a, 0xfd, 0x2d, 0x7a, 0xd5, 0xc3, 0x31, 0x4d, - 0x22, 0x07, 0xe4, 0xce, 0xcd, 0x01, 0x0d, 0xc8, 0xf6, 0x1c, 0x5b, 0x35, 0xcc, 0x4f, 0x87, 0x39, - 0xf8, 0xce, 0x76, 0xf3, 0x6c, 0x50, 0x7b, 0x64, 0xd2, 0x3b, 0x79, 0xd0, 0xef, 0x12, 0x56, 0xbf, - 0xb3, 0xdd, 0xc4, 0x9c, 0x79, 0x5c, 0x40, 0x9a, 0x9d, 0x32, 0x20, 0x5d, 0x05, 0x68, 0xc7, 0xcf, - 0x0e, 0xf2, 0xbe, 0x47, 0x8e, 0xa8, 0x3d, 0x37, 0x68, 0x54, 0x88, 0xc1, 0x52, 0x8b, 0xb7, 0xe6, - 0xaa, 0xfd, 0x67, 0x81, 0xd5, 0x91, 0x0f, 0x99, 0xd3, 0xdd, 0x89, 0x4b, 0x4a, 0xcd, 0xd2, 0xe6, - 0xb0, 0x30, 0x3c, 0x2a, 0x1f, 0xf9, 0xb0, 0x64, 0xab, 0x0e, 0x31, 0x56, 0x5a, 0x9c, 0x5a, 0xe9, - 0x45, 0xae, 0xb0, 0x39, 0x2c, 0x08, 0x8f, 0xca, 0x46, 0xdf, 0x85, 0x95, 0x10, 0x38, 0xda, 0xa6, - 0x8b, 0x80, 0x9d, 0x6d, 0xac, 0x9e, 0x0e, 0x6a, 0x2b, 0xcd, 0x89, 0x54, 0xf8, 0x3e, 0x12, 0x90, - 0x0d, 0xb3, 0xae, 0x2c, 0x70, 0x4b, 0xa2, 0x28, 0xf9, 0x5a, 0xba, 0x55, 0xc4, 0xde, 0x5f, 0xd7, - 0x0b, 0xdb, 0xe8, 0xc9, 0x45, 0xd5, 0xb4, 0x4a, 0x36, 0x7a, 0x0b, 0x4a, 0x96, 0xe7, 0xf9, 0x81, - 0x25, 0x1f, 0x0e, 0xca, 0x42, 0xd5, 0xc6, 0xd4, 0xaa, 0x36, 0x62, 0x19, 0x43, 0x85, 0xb4, 0x86, - 0xc1, 0xba, 0x2a, 0x74, 0x0f, 0x16, 0xfc, 0x7b, 0x1e, 0xa1, 0x98, 0x1c, 0x12, 0x4a, 0xbc, 0x16, - 0x61, 0xd5, 0x8a, 0xd0, 0xfe, 0x4c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x87, - 0xb5, 0xa0, 0x3a, 0x8f, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe7, 0xe3, 0xb7, 0xe6, - 0xad, 0x08, 0x8a, 0x35, 0x0a, 0xd4, 0x83, 0x4a, 0x47, 0x4f, 0x19, 0xd5, 0x25, 0x61, 0xe6, 0xb5, - 0x74, 0x66, 0x8e, 0x26, 0xb5, 0xb8, 0x82, 0x49, 0xe0, 0x70, 0x52, 0xcb, 0xca, 0x73, 0x50, 0xfa, - 0x94, 0xc5, 0x3d, 0x6f, 0x0e, 0x86, 0x0f, 0x64, 0xaa, 0xe6, 0xe0, 0x4f, 0x19, 0x98, 0x4f, 0x6e, - 0xe3, 0x50, 0x3a, 0xcc, 0xa7, 0x4a, 0x87, 0x61, 0x1b, 0x6a, 0x4c, 0x1c, 0x3a, 0x84, 0xf1, 0x39, - 0x3b, 0x31, 0x3e, 0xab, 0x30, 0x98, 0x7b, 0x90, 0x30, 0x58, 0x07, 0xe0, 0x75, 0x06, 0xf5, 0x5d, - 0x97, 0x50, 0x11, 0x01, 0x0b, 0x6a, 0xb8, 0x10, 0x41, 0xb1, 0x46, 0xc1, 0xab, 0xe1, 0x03, 0xd7, - 0x6f, 0x1d, 0x8b, 0x2d, 0x08, 0x6f, 0xaf, 0x88, 0x7d, 0x05, 0x59, 0x0d, 0x37, 0x46, 0xb0, 0x78, - 0x0c, 0x87, 0xd9, 0x87, 0x8b, 0x7b, 0x16, 0x0d, 0x1c, 0xcb, 0x8d, 0x6f, 0x8a, 0x68, 0x37, 0xde, - 0x18, 0x69, 0x66, 0x9e, 0x9e, 0xf6, 0xc6, 0xc5, 0x9b, 0x1f, 0xc3, 0xe2, 0x86, 0xc6, 0xfc, 0x9b, - 0x01, 0x97, 0xc6, 0xea, 0xfe, 0x0c, 0x9a, 0xa9, 0x37, 0x92, 0xcd, 0xd4, 0xf3, 0x29, 0x5f, 0x21, - 0xc7, 0x59, 0x3b, 0xa1, 0xb5, 0x9a, 0x83, 0xfc, 0x1e, 0x2f, 0x62, 0xcd, 0x0f, 0x0d, 0x28, 0x8b, - 0x5f, 0xd3, 0x3c, 0x02, 0xd7, 0x92, 0xb3, 0x81, 0xe2, 0xc3, 0x9b, 0x0b, 0x3c, 0x8c, 0x57, 0xe2, - 0x77, 0x0c, 0x48, 0x3e, 0xbf, 0xa2, 0x17, 0xe5, 0x15, 0x30, 0xa2, 0xf7, 0xd1, 0x29, 0xdd, 0xff, - 0x85, 0x49, 0xdd, 0xe4, 0x85, 0x54, 0x0f, 0x8d, 0x4f, 0x42, 0x11, 0xfb, 0x7e, 0xb0, 0x67, 0x05, - 0x47, 0x8c, 0xef, 0x5d, 0x97, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, - 0x37, 0xe0, 0xd2, 0xc4, 0x91, 0x0f, 0x8f, 0x22, 0xad, 0xe8, 0x4b, 0xad, 0x28, 0x72, 0xe4, 0x98, - 0x0e, 0x6b, 0x54, 0xbc, 0x0d, 0x4c, 0xcc, 0x89, 0x86, 0xdb, 0xc0, 0x84, 0x36, 0x9c, 0xa4, 0x35, - 0xff, 0x95, 0x01, 0x35, 0x63, 0xf9, 0x1f, 0x3b, 0xfd, 0xe3, 0x43, 0x13, 0x9e, 0xf9, 0xe4, 0x84, - 0x27, 0x1a, 0xe7, 0x68, 0x23, 0x8e, 0xec, 0xfd, 0x47, 0x1c, 0xe8, 0xd9, 0x68, 0x6a, 0x22, 0x7d, - 0x68, 0x35, 0x39, 0x35, 0x39, 0x1b, 0xd4, 0xca, 0x4a, 0x78, 0x72, 0x8a, 0xf2, 0x1a, 0xcc, 0xd9, - 0x24, 0xb0, 0x1c, 0x57, 0xb6, 0x74, 0xa9, 0xe7, 0x00, 0x52, 0x58, 0x53, 0xb2, 0x36, 0x4a, 0xdc, - 0x26, 0xf5, 0x81, 0x43, 0x81, 0x3c, 0x60, 0xb7, 0x7c, 0x5b, 0x76, 0x24, 0xf9, 0x38, 0x60, 0x6f, - 0xfa, 0x36, 0xc1, 0x02, 0x63, 0xbe, 0x6b, 0x40, 0x49, 0x4a, 0xda, 0xb4, 0x7a, 0x8c, 0xa0, 0x2b, - 0xd1, 0x2a, 0xe4, 0x71, 0x5f, 0xd2, 0xc7, 0x63, 0x67, 0x83, 0x5a, 0x51, 0x90, 0x89, 0x66, 0x66, - 0xcc, 0x18, 0x28, 0x73, 0xce, 0x1e, 0x3d, 0x0a, 0x79, 0x71, 0x81, 0xd4, 0x66, 0xc6, 0x73, 0x3e, - 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x9c, 0x81, 0x4a, 0x62, 0x71, 0x29, 0xfa, 0x82, 0xe8, 0xf5, 0x33, - 0x93, 0xe2, 0x45, 0x7d, 0xf2, 0x54, 0x5d, 0xa5, 0xaf, 0xd9, 0x07, 0x49, 0x5f, 0xdf, 0x86, 0xd9, - 0x16, 0xdf, 0xa3, 0xf0, 0x4f, 0x1a, 0x57, 0xa6, 0x39, 0x4e, 0xb1, 0xbb, 0xb1, 0x37, 0x8a, 0x4f, - 0x86, 0x95, 0x40, 0x74, 0x13, 0x96, 0x28, 0x09, 0x68, 0x7f, 0xe3, 0x30, 0x20, 0x54, 0x7f, 0x07, - 0xc8, 0xc7, 0xd5, 0x37, 0x1e, 0x26, 0xc0, 0xa3, 0x3c, 0xe6, 0x01, 0x94, 0x6f, 0x5b, 0x07, 0x6e, - 0x34, 0xd9, 0xc2, 0x50, 0x71, 0xbc, 0x96, 0xdb, 0xb3, 0x89, 0x0c, 0xe8, 0x61, 0xf4, 0x0a, 0x2f, - 0xed, 0xb6, 0x8e, 0x3c, 0x1b, 0xd4, 0x2e, 0x24, 0x00, 0x72, 0x94, 0x83, 0x93, 0x22, 0x4c, 0x17, - 0x72, 0x9f, 0x61, 0x27, 0xf9, 0x1d, 0x28, 0xc6, 0xb5, 0xfe, 0x43, 0x56, 0x69, 0xbe, 0x01, 0x05, - 0xee, 0xf1, 0x61, 0x8f, 0x7a, 0x4e, 0x95, 0x94, 0xac, 0xbd, 0x32, 0x69, 0x6a, 0x2f, 0x31, 0x1f, - 0xbd, 0xd3, 0xb5, 0x1f, 0x70, 0x3e, 0x9a, 0x79, 0x90, 0xcc, 0x97, 0x9d, 0x32, 0xf3, 0x5d, 0x05, - 0xf9, 0x1f, 0x12, 0x9e, 0x64, 0x64, 0x01, 0xa1, 0x25, 0x19, 0x3d, 0xff, 0x6b, 0xc3, 0x81, 0x1f, - 0x1b, 0x00, 0xe2, 0x15, 0xee, 0xc6, 0x09, 0xf1, 0x82, 0x14, 0x93, 0xf8, 0x3b, 0x30, 0xeb, 0x4b, - 0x8f, 0x94, 0x33, 0xd2, 0x29, 0x9f, 0x7a, 0xa3, 0x8b, 0x24, 0x7d, 0x12, 0x2b, 0x61, 0x8d, 0x97, - 0x3f, 0xf8, 0x64, 0x75, 0xe6, 0xc3, 0x4f, 0x56, 0x67, 0x3e, 0xfa, 0x64, 0x75, 0xe6, 0xed, 0xd3, - 0x55, 0xe3, 0x83, 0xd3, 0x55, 0xe3, 0xc3, 0xd3, 0x55, 0xe3, 0xa3, 0xd3, 0x55, 0xe3, 0xe3, 0xd3, - 0x55, 0xe3, 0xdd, 0x7f, 0xac, 0xce, 0xbc, 0xf6, 0x58, 0x9a, 0xff, 0xe6, 0xfd, 0x37, 0x00, 0x00, - 0xff, 0xff, 0x0b, 0x4d, 0x51, 0xc5, 0xdb, 0x27, 0x00, 0x00, + proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_a8431b6e0aeeb761) +} + +var fileDescriptor_a8431b6e0aeeb761 = []byte{ + // 2873 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57, + 0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa, + 0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44, + 0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49, + 0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f, + 0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84, + 0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7, + 0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e, + 0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26, + 0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03, + 0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5, + 0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf, + 0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b, + 0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e, + 0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76, + 0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f, + 0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28, + 0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26, + 0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28, + 0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29, + 0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f, + 0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, + 0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d, + 0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86, + 0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5, + 0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, + 0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e, + 0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a, + 0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb, + 0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71, + 0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53, + 0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7, + 0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82, + 0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5, + 0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95, + 0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b, + 0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8, + 0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75, + 0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b, + 0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09, + 0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, + 0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3, + 0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3, + 0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74, + 0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81, + 0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2, + 0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22, + 0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1, + 0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60, + 0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6, + 0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1, + 0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82, + 0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8, + 0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, + 0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56, + 0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e, + 0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47, + 0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc, + 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79, + 0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13, + 0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77, + 0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a, + 0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e, + 0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45, + 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98, + 0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3, + 0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e, + 0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18, + 0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d, + 0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69, + 0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91, + 0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54, + 0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63, + 0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b, + 0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92, + 0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23, + 0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57, + 0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b, + 0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a, + 0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f, + 0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c, + 0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f, + 0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e, + 0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41, + 0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37, + 0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf, + 0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73, + 0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15, + 0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88, + 0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69, + 0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7, + 0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d, + 0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7, + 0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7, + 0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14, + 0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1, + 0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17, + 0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc, + 0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f, + 0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98, + 0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b, + 0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51, + 0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c, + 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a, + 0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3, + 0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e, + 0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd, + 0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf, + 0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b, + 0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65, + 0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6, + 0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5, + 0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59, + 0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc, + 0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17, + 0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6, + 0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75, + 0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43, + 0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86, + 0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c, + 0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53, + 0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54, + 0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a, + 0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2, + 0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91, + 0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77, + 0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4, + 0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1, + 0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0, + 0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8, + 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8, + 0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30, + 0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a, + 0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39, + 0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb, + 0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95, + 0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0, + 0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04, + 0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66, + 0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d, + 0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0, + 0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1, + 0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09, + 0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd, + 0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70, + 0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a, + 0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2, + 0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5, + 0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9, + 0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80, + 0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a, + 0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef, + 0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2, + 0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8, + 0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06, + 0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f, + 0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a, + 0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81, + 0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52, + 0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f, + 0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad, + 0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78, + 0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03, + 0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52, + 0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a, + 0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19, + 0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90, + 0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8, + 0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda, + 0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98, + 0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73, + 0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0, + 0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82, + 0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc, + 0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f, + 0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f, + 0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31, + 0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31, + 0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00, } func (m *APIGroup) Marshal() (dAtA []byte, err error) { @@ -2024,6 +2055,48 @@ func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *FieldSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2503,6 +2576,16 @@ func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SendInitialEvents != nil { + i-- + if *m.SendInitialEvents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } i -= len(m.ResourceVersionMatch) copy(dAtA[i:], m.ResourceVersionMatch) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersionMatch))) @@ -3703,6 +3786,25 @@ func (m *Duration) Size() (n int) { return n } +func (m *FieldSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *FieldsV1) Size() (n int) { if m == nil { return 0 @@ -3908,6 +4010,9 @@ func (m *ListOptions) Size() (n int) { n += 2 l = len(m.ResourceVersionMatch) n += 1 + l + sovGenerated(uint64(l)) + if m.SendInitialEvents != nil { + n += 2 + } return n } @@ -4415,6 +4520,18 @@ func (this *Duration) String() string { }, "") return s } +func (this *FieldSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} func (this *GetOptions) String() string { if this == nil { return "nil" @@ -4517,6 +4634,7 @@ func (this *ListOptions) String() string { `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `ResourceVersionMatch:` + fmt.Sprintf("%v", this.ResourceVersionMatch) + `,`, + `SendInitialEvents:` + valueToStringGenerated(this.SendInitialEvents) + `,`, `}`, }, "") return s @@ -6428,6 +6546,152 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } return nil } +func (m *FieldSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = FieldSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8250,6 +8514,27 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } m.ResourceVersionMatch = ResourceVersionMatch(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendInitialEvents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.SendInitialEvents = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 2be188a6..18dd0b06 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -34,6 +34,7 @@ message APIGroup { optional string name = 1; // versions are the versions supported in this group. + // +listType=atomic repeated GroupVersionForDiscovery versions = 2; // preferredVersion is the version preferred by the API server, which @@ -49,6 +50,7 @@ message APIGroup { // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. // +optional + // +listType=atomic repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; } @@ -56,6 +58,7 @@ message APIGroup { // /apis. message APIGroupList { // groups is a list of APIGroup. + // +listType=atomic repeated APIGroup groups = 1; } @@ -88,9 +91,11 @@ message APIResource { optional Verbs verbs = 4; // shortNames is a list of suggested short names of the resource. + // +listType=atomic repeated string shortNames = 5; // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + // +listType=atomic repeated string categories = 7; // The hash value of the storage version, the version this resource is @@ -112,6 +117,7 @@ message APIResourceList { optional string groupVersion = 1; // resources contains the name of the resources and if they are namespaced. + // +listType=atomic repeated APIResource resources = 2; } @@ -122,6 +128,7 @@ message APIResourceList { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { // versions are the api versions that are available. + // +listType=atomic repeated string versions = 1; // a map of client CIDR to server address that is serving this group. @@ -131,6 +138,7 @@ message APIVersions { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +listType=atomic repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; } @@ -145,6 +153,7 @@ message ApplyOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // Force is going to "force" Apply requests. It means user will @@ -235,6 +244,7 @@ message CreateOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // fieldManager is a name associated with the actor or entity @@ -246,19 +256,16 @@ message CreateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -306,6 +313,7 @@ message DeleteOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 5; } @@ -316,6 +324,25 @@ message Duration { optional int64 duration = 1; } +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message FieldSelectorRequirement { + // key is the field selector key that the requirement applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + optional string operator = 2; + + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + repeated string values = 3; +} + // FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. // // Each key is either a '.' representing the field itself, and will always map to an empty set, @@ -421,6 +448,7 @@ message LabelSelector { // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +optional + // +listType=atomic repeated LabelSelectorRequirement matchExpressions = 2; } @@ -428,8 +456,6 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. @@ -441,6 +467,7 @@ message LabelSelectorRequirement { // the values array must be empty. This array is replaced during a strategic // merge patch. // +optional + // +listType=atomic repeated string values = 3; } @@ -452,7 +479,7 @@ message List { optional ListMeta metadata = 1; // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; + repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; } // ListMeta describes metadata that synthetic resources must have, including lists and @@ -575,6 +602,32 @@ message ListOptions { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + optional bool sendInitialEvents = 11; } // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource @@ -645,7 +698,7 @@ message ObjectMeta { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional optional string name = 1; @@ -671,7 +724,7 @@ message ObjectMeta { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional optional string namespace = 3; @@ -685,7 +738,7 @@ message ObjectMeta { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 5; @@ -749,14 +802,14 @@ message ObjectMeta { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional map labels = 11; // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional map annotations = 12; @@ -767,6 +820,8 @@ message ObjectMeta { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid repeated OwnerReference ownerReferences = 13; // Must be empty before the object is deleted from the registry. Each entry @@ -784,6 +839,7 @@ message ObjectMeta { // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge + // +listType=set repeated string finalizers = 14; // ManagedFields maps workflow-id and version to the set of fields @@ -795,6 +851,7 @@ message ObjectMeta { // workflow used when modifying the object. // // +optional + // +listType=atomic repeated ManagedFieldsEntry managedFields = 17; } @@ -811,11 +868,11 @@ message OwnerReference { optional string kind = 1; // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names optional string name = 3; // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids optional string uid = 4; // If true, this reference points to the managing controller. @@ -869,6 +926,7 @@ message PatchOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // Force is going to "force" Apply requests. It means user will @@ -889,19 +947,16 @@ message PatchOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -925,6 +980,7 @@ message Preconditions { // For example: "/healthz", "/apis". message RootPaths { // paths are the paths available at root. + // +listType=atomic repeated string paths = 1; } @@ -967,6 +1023,7 @@ message Status { // is not guaranteed to conform to any schema except that defined by // the reason type. // +optional + // +listType=atomic optional StatusDetails details = 5; // Suggested HTTP return code for this status, 0 if not set. @@ -1024,13 +1081,14 @@ message StatusDetails { // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional optional string uid = 6; // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional + // +listType=atomic repeated StatusCause causes = 4; // If specified, the time in seconds before the operation should be retried. Some errors may indicate @@ -1117,6 +1175,7 @@ message UpdateOptions { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic repeated string dryRun = 1; // fieldManager is a name associated with the actor or entity @@ -1128,19 +1187,16 @@ message UpdateOptions { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -1172,6 +1228,6 @@ message WatchEvent { // * If Type is Deleted: the state of the object immediately before deletion. // * If Type is Error: *Status is recommended; other types may make sense // depending on context. - optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; + optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index 592dcb8a..c748071e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" + utiljson "k8s.io/apimachinery/pkg/util/json" ) // LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements @@ -280,13 +282,20 @@ func (f FieldsV1) MarshalJSON() ([]byte, error) { if f.Raw == nil { return []byte("null"), nil } + if f.getContentType() == fieldsV1InvalidOrValidCBORObject { + var u map[string]interface{} + if err := cbor.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 cbor invalid: %w", err) + } + return utiljson.Marshal(u) + } return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler func (f *FieldsV1) UnmarshalJSON(b []byte) error { if f == nil { - return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + return errors.New("metav1.FieldsV1: UnmarshalJSON on nil pointer") } if !bytes.Equal(b, []byte("null")) { f.Raw = append(f.Raw[0:0], b...) @@ -296,3 +305,75 @@ func (f *FieldsV1) UnmarshalJSON(b []byte) error { var _ json.Marshaler = FieldsV1{} var _ json.Unmarshaler = &FieldsV1{} + +func (f FieldsV1) MarshalCBOR() ([]byte, error) { + if f.Raw == nil { + return cbor.Marshal(nil) + } + if f.getContentType() == fieldsV1InvalidOrValidJSONObject { + var u map[string]interface{} + if err := utiljson.Unmarshal(f.Raw, &u); err != nil { + return nil, fmt.Errorf("metav1.FieldsV1 json invalid: %w", err) + } + return cbor.Marshal(u) + } + return f.Raw, nil +} + +var cborNull = []byte{0xf6} + +func (f *FieldsV1) UnmarshalCBOR(b []byte) error { + if f == nil { + return errors.New("metav1.FieldsV1: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(b, cborNull) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +const ( + // fieldsV1InvalidOrEmpty indicates that a FieldsV1 either contains no raw bytes or its raw + // bytes don't represent an allowable value in any supported encoding. + fieldsV1InvalidOrEmpty = iota + + // fieldsV1InvalidOrValidJSONObject indicates that a FieldV1 either contains raw bytes that + // are a valid JSON encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidJSONObject + + // fieldsV1InvalidOrValidCBORObject indicates that a FieldV1 either contains raw bytes that + // are a valid CBOR encoding of an allowable value or don't represent an allowable value in + // any supported encoding. + fieldsV1InvalidOrValidCBORObject +) + +// getContentType returns one of fieldsV1InvalidOrEmpty, fieldsV1InvalidOrValidJSONObject, +// fieldsV1InvalidOrValidCBORObject based on the value of Raw. +// +// Raw can be encoded in JSON or CBOR and is only valid if it is empty, null, or an object (map) +// value. It is invalid if it contains a JSON string, number, boolean, or array. If Raw is nonempty +// and represents an allowable value, then the initial byte unambiguously distinguishes a +// JSON-encoded value from a CBOR-encoded value. +// +// A valid JSON-encoded value can begin with any of the four JSON whitespace characters, the first +// character 'n' of null, or '{' (0x09, 0x0a, 0x0d, 0x20, 0x6e, or 0x7b, respectively). A valid +// CBOR-encoded value can begin with the null simple value, an initial byte with major type "map", +// or, if a tag-enclosed map, an initial byte with major type "tag" (0xf6, 0xa0...0xbf, or +// 0xc6...0xdb). The two sets of valid initial bytes don't intersect. +func (f FieldsV1) getContentType() int { + if len(f.Raw) > 0 { + p := f.Raw[0] + switch p { + case 'n', '{', '\t', '\r', '\n', ' ': + return fieldsV1InvalidOrValidJSONObject + case 0xf6: // null + return fieldsV1InvalidOrValidCBORObject + default: + if p >= 0xa0 && p <= 0xbf /* map */ || p >= 0xc6 && p <= 0xdb /* tag */ { + return fieldsV1InvalidOrValidCBORObject + } + } + } + return fieldsV1InvalidOrEmpty +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 8eb37f43..9f302b3f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" @@ -129,6 +131,25 @@ func (t *MicroTime) UnmarshalJSON(b []byte) error { return nil } +func (t *MicroTime) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(RFC3339Micro, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *MicroTime) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -160,6 +181,13 @@ func (t MicroTime) MarshalJSON() ([]byte, error) { return json.Marshal(t.UTC().Format(RFC3339Micro)) } +func (t MicroTime) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + return cbor.Marshal(t.UTC().Format(RFC3339Micro)) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 421770d4..0333cfdb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -19,6 +19,8 @@ package v1 import ( "encoding/json" "time" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" ) // Time is a wrapper around time.Time which supports correct @@ -116,6 +118,25 @@ func (t *Time) UnmarshalJSON(b []byte) error { return nil } +func (t *Time) UnmarshalCBOR(b []byte) error { + var s *string + if err := cbor.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + t.Time = time.Time{} + return nil + } + + parsed, err := time.Parse(time.RFC3339, *s) + if err != nil { + return err + } + + t.Time = parsed.Local() + return nil +} + // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *Time) UnmarshalQueryParameter(str string) error { if len(str) == 0 { @@ -151,6 +172,14 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +func (t Time) MarshalCBOR() ([]byte, error) { + if t.IsZero() { + return cbor.Marshal(nil) + } + + return cbor.Marshal(t.UTC().Format(time.RFC3339)) +} + // ToUnstructured implements the value.UnstructuredConverter interface. func (t Time) ToUnstructured() interface{} { if t.IsZero() { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 152f9929..473adb9e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -114,7 +114,7 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -140,7 +140,7 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces // +optional Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` @@ -154,7 +154,7 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` @@ -218,14 +218,14 @@ type ObjectMeta struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://kubernetes.io/docs/user-guide/labels + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels // +optional Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations // +optional Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` @@ -236,6 +236,8 @@ type ObjectMeta struct { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` // Must be empty before the object is deleted from the registry. Each entry @@ -253,6 +255,7 @@ type ObjectMeta struct { // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge + // +listType=set Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` // Tombstone: ClusterName was a legacy field that was always cleared by @@ -268,6 +271,7 @@ type ObjectMeta struct { // workflow used when modifying the object. // // +optional + // +listType=atomic ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } @@ -295,10 +299,10 @@ type OwnerReference struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // If true, this reference points to the managing controller. // +optional @@ -400,8 +404,43 @@ type ListOptions struct { // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` + + // `sendInitialEvents=true` may be set together with `watch=true`. + // In that case, the watch stream will begin with synthetic events to + // produce the current state of objects in the collection. Once all such + // events have been sent, a synthetic "Bookmark" event will be sent. + // The bookmark will report the ResourceVersion (RV) corresponding to the + // set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation. + // Afterwards, the watch stream will proceed as usual, sending watch events + // corresponding to changes (subsequent to the RV) to objects watched. + // + // When `sendInitialEvents` option is set, we require `resourceVersionMatch` + // option to also be set. The semantic of the watch request is as following: + // - `resourceVersionMatch` = NotOlderThan + // is interpreted as "data at least as new as the provided `resourceVersion`" + // and the bookmark event is send when the state is synced + // to a `resourceVersion` at least as fresh as the one provided by the ListOptions. + // If `resourceVersion` is unset, this is interpreted as "consistent read" and the + // bookmark event is send when the state is synced at least to the moment + // when request started being processed. + // - `resourceVersionMatch` set to any other value or unset + // Invalid error is returned. + // + // Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward + // compatibility reasons) and to false otherwise. + // +optional + SendInitialEvents *bool `json:"sendInitialEvents,omitempty" protobuf:"varint,11,opt,name=sendInitialEvents"` } +const ( + // InitialEventsAnnotationKey the name of the key + // under which an annotation marking the end of + // a watchlist stream is stored. + // + // The annotation is added to a "Bookmark" event. + InitialEventsAnnotationKey = "k8s.io/initial-events-end" +) + // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch // may only be set if resourceVersion is also set. // @@ -505,6 +544,7 @@ type DeleteOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } @@ -530,6 +570,7 @@ type CreateOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // +k8s:deprecated=includeUninitialized,protobuf=2 @@ -542,19 +583,16 @@ type CreateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -577,6 +615,7 @@ type PatchOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // Force is going to "force" Apply requests. It means user will @@ -597,19 +636,16 @@ type PatchOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -631,6 +667,7 @@ type ApplyOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // Force is going to "force" Apply requests. It means user will @@ -663,6 +700,7 @@ type UpdateOptions struct { // request. Valid values are: // - All: all dry run stages will be processed // +optional + // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` // fieldManager is a name associated with the actor or entity @@ -674,19 +712,16 @@ type UpdateOptions struct { // fieldValidation instructs the server on how to handle // objects in the request (POST/PUT/PATCH) containing unknown - // or duplicate fields, provided that the `ServerSideFieldValidation` - // feature gate is also enabled. Valid values are: + // or duplicate fields. Valid values are: // - Ignore: This will ignore any unknown fields that are silently // dropped from the object, and will ignore all but the last duplicate // field that the decoder encounters. This is the default behavior - // prior to v1.23 and is the default behavior when the - // `ServerSideFieldValidation` feature gate is disabled. + // prior to v1.23. // - Warn: This will send a warning via the standard warning response // header for each unknown field that is dropped from the object, and // for each duplicate field that is encountered. The request will // still succeed if there are no other errors, and will only persist - // the last of any duplicate fields. This is the default when the - // `ServerSideFieldValidation` feature gate is enabled. + // the last of any duplicate fields. This is the default in v1.23+ // - Strict: This will fail the request with a BadRequest error if // any unknown fields would be dropped from the object, or if any // duplicate fields are present. The error returned from the server @@ -734,6 +769,7 @@ type Status struct { // is not guaranteed to conform to any schema except that defined by // the reason type. // +optional + // +listType=atomic Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` // Suggested HTTP return code for this status, 0 if not set. // +optional @@ -761,12 +797,13 @@ type StatusDetails struct { Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids // +optional UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. // +optional + // +listType=atomic Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` // If specified, the time in seconds before the operation should be retried. Some errors may indicate // the client must take an alternate action - for those errors this field may indicate how long to wait @@ -978,6 +1015,24 @@ const ( // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) // values that can not be handled (e.g. an enumerated string). CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + CauseTypeForbidden CauseType = "FieldValueForbidden" + // CauseTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + CauseTypeTooLong CauseType = "FieldValueTooLong" + // CauseTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + CauseTypeTooMany CauseType = "FieldValueTooMany" + // CauseTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + CauseTypeInternal CauseType = "InternalError" + // CauseTypeTypeInvalid is for the value did not match the schema type for that field + CauseTypeTypeInvalid CauseType = "FieldValueTypeInvalid" // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client // without the expected return type. The presence of this cause indicates the error may be // due to an intervening proxy or the server software malfunctioning. @@ -1012,6 +1067,7 @@ type List struct { type APIVersions struct { TypeMeta `json:",inline"` // versions are the api versions that are available. + // +listType=atomic Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` // a map of client CIDR to server address that is serving this group. // This is to help clients reach servers in the most network-efficient way possible. @@ -1020,6 +1076,7 @@ type APIVersions struct { // The server returns only those CIDRs that it thinks that the client can match. // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +listType=atomic ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` } @@ -1030,6 +1087,7 @@ type APIVersions struct { type APIGroupList struct { TypeMeta `json:",inline"` // groups is a list of APIGroup. + // +listType=atomic Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` } @@ -1042,6 +1100,7 @@ type APIGroup struct { // name is the name of the group. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // versions are the versions supported in this group. + // +listType=atomic Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` // preferredVersion is the version preferred by the API server, which // probably is the storage version. @@ -1055,6 +1114,7 @@ type APIGroup struct { // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. // +optional + // +listType=atomic ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` } @@ -1099,8 +1159,10 @@ type APIResource struct { // update, patch, delete, deletecollection, and proxy) Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` // shortNames is a list of suggested short names of the resource. + // +listType=atomic ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + // +listType=atomic Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` // The hash value of the storage version, the version this resource is // converted to when written to the data store. Value must be treated @@ -1133,6 +1195,7 @@ type APIResourceList struct { // groupVersion is the group and version this APIResourceList is for. GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` // resources contains the name of the resources and if they are namespaced. + // +listType=atomic APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` } @@ -1140,6 +1203,7 @@ type APIResourceList struct { // For example: "/healthz", "/apis". type RootPaths struct { // paths are the paths available at root. + // +listType=atomic Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` } @@ -1183,6 +1247,7 @@ type LabelSelector struct { MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +optional + // +listType=atomic MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` } @@ -1190,9 +1255,7 @@ type LabelSelector struct { // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists and DoesNotExist. Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` @@ -1201,6 +1264,7 @@ type LabelSelectorRequirement struct { // the values array must be empty. This array is replaced during a strategic // merge patch. // +optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } @@ -1214,6 +1278,33 @@ const ( LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) +// FieldSelectorRequirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type FieldSelectorRequirement struct { + // key is the field selector key that the requirement applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + // The list of operators may grow in the future. + Operator FieldSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=FieldSelectorOperator"` + // values is an array of string values. + // If the operator is In or NotIn, the values array must be non-empty. + // If the operator is Exists or DoesNotExist, the values array must be empty. + // +optional + // +listType=atomic + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A field selector operator is the set of operators that can be used in a selector requirement. +type FieldSelectorOperator string + +const ( + FieldSelectorOpIn FieldSelectorOperator = "In" + FieldSelectorOpNotIn FieldSelectorOperator = "NotIn" + FieldSelectorOpExists FieldSelectorOperator = "Exists" + FieldSelectorOpDoesNotExist FieldSelectorOperator = "DoesNotExist" +) + // ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource // that the fieldset applies to. type ManagedFieldsEntry struct { @@ -1302,8 +1393,10 @@ type Table struct { // columnDefinitions describes each column in the returned items array. The number of cells per row // will always match the number of column definitions. + // +listType=atomic ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` // rows is the list of items in the table. + // +listType=atomic Rows []TableRow `json:"rows"` } @@ -1336,12 +1429,14 @@ type TableRow struct { // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a // more detailed description. + // +listType=atomic Cells []interface{} `json:"cells"` // conditions describe additional status of a row that are relevant for a human user. These conditions // apply to the row, not to the object, and will be specific to table output. The only defined // condition type is 'Completed', for a row that indicates a resource that has run to completion and // can be given less visual priority. // +optional + // +listType=atomic Conditions []TableRowCondition `json:"conditions,omitempty"` // This field contains the requested additional information about each object based on the includeObject // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 9570726a..1fa37215 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_APIGroup = map[string]string{ @@ -115,7 +115,7 @@ var map_CreateOptions = map[string]string{ "": "CreateOptions may be provided when creating an API object.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -135,6 +135,17 @@ func (DeleteOptions) SwaggerDoc() map[string]string { return map_DeleteOptions } +var map_FieldSelectorRequirement = map[string]string{ + "": "FieldSelectorRequirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the field selector key that the requirement applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. The list of operators may grow in the future.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.", +} + +func (FieldSelectorRequirement) SwaggerDoc() map[string]string { + return map_FieldSelectorRequirement +} + var map_FieldsV1 = map[string]string{ "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } @@ -216,6 +227,7 @@ var map_ListOptions = map[string]string{ "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "sendInitialEvents": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -239,18 +251,18 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "namespace": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", "selfLink": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", @@ -264,8 +276,8 @@ var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "controller": "If true, this reference points to the managing controller.", "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", } @@ -306,7 +318,7 @@ var map_PatchOptions = map[string]string{ "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (PatchOptions) SwaggerDoc() map[string]string { @@ -372,7 +384,7 @@ var map_StatusDetails = map[string]string{ "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", } @@ -451,7 +463,7 @@ var map_UpdateOptions = map[string]string{ "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "fieldValidation": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 2e33283e..0f58d66c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -173,7 +173,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) } } return strMap, true, nil diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index a499eee8..40d289f3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -101,6 +101,11 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { return nil } +func (obj *Unstructured) EachListItemWithAlloc(fn func(runtime.Object) error) error { + // EachListItem has allocated a new Object for the user, we can use it directly. + return obj.EachListItem(fn) +} + func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { return make(map[string]interface{}) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index 5028f5fb..82beda2a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,6 +52,15 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } +func (u *UnstructuredList) EachListItemWithAlloc(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&Unstructured{Object: u.Items[i].Object}); err != nil { + return err + } + } + return nil +} + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index a0f709ad..3eba5ba5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -32,6 +32,10 @@ import ( type LabelSelectorValidationOptions struct { // Allow invalid label value in selector AllowInvalidLabelValueInSelector bool + + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool } // LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. @@ -79,7 +83,9 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) } default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } } allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) if !opts.AllowInvalidLabelValueInSelector { @@ -113,6 +119,39 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi return allErrs } +// FieldSelectorValidationOptions is a struct that can be passed to ValidateFieldSelectorRequirement to record the validate options +type FieldSelectorValidationOptions struct { + // Allows an operator that is not interpretable to pass validation. This is useful for cases where a broader check + // can be performed, as in a *SubjectAccessReview + AllowUnknownOperatorInRequirement bool +} + +// ValidateLabelSelectorRequirement validates the requirement according to the opts and returns any validation errors. +func ValidateFieldSelectorRequirement(requirement metav1.FieldSelectorRequirement, opts FieldSelectorValidationOptions, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(requirement.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "must be specified")) + } + + switch requirement.Operator { + case metav1.FieldSelectorOpIn, metav1.FieldSelectorOpNotIn: + if len(requirement.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.FieldSelectorOpExists, metav1.FieldSelectorOpDoesNotExist: + if len(requirement.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + if !opts.AllowUnknownOperatorInRequirement { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), requirement.Operator, "not a valid selector operator")) + } + } + + return allErrs +} + func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { allErrs := field.ErrorList{} //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go index b7590f0b..afe01ed5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -426,6 +426,13 @@ func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, } else { out.Continue = "" } + if values, ok := map[string][]string(*in)["sendInitialEvents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.SendInitialEvents, s); err != nil { + return err + } + } else { + out.SendInitialEvents = nil + } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 418e6099..90cc54a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -327,6 +327,27 @@ func (in *Duration) DeepCopy() *Duration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldSelectorRequirement) DeepCopyInto(out *FieldSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldSelectorRequirement. +func (in *FieldSelectorRequirement) DeepCopy() *FieldSelectorRequirement { + if in == nil { + return nil + } + out := new(FieldSelectorRequirement) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in @@ -602,6 +623,11 @@ func (in *ListOptions) DeepCopyInto(out *ListOptions) { *out = new(int64) **out = **in } + if in.SendInitialEvents != nil { + in, out := &in.SendInitialEvents, &out.SendInitialEvents + *out = new(bool) + **out = **in + } return } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go index a2abc67c..819d936f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +// source: k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto package v1beta1 @@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } func (*PartialObjectMetadataList) ProtoMessage() {} func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_90ec10f86b91f9a8, []int{0} + return fileDescriptor_39237a8d8061b52f, []int{0} } func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -77,31 +77,30 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) + proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_39237a8d8061b52f) } -var fileDescriptor_90ec10f86b91f9a8 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto +var fileDescriptor_39237a8d8061b52f = []byte{ + // 303 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0xf3, 0x30, 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x30, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xdb, 0xc1, - 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x7f, 0xbb, 0x58, - 0xd3, 0x94, 0xe4, 0xdf, 0x81, 0x37, 0x3f, 0x82, 0x1f, 0x6b, 0xc7, 0x1d, 0x07, 0xc2, 0x70, 0xf5, - 0x8b, 0x48, 0xda, 0x2a, 0x32, 0x14, 0x7a, 0xeb, 0xf3, 0x94, 0xdf, 0x2f, 0x4f, 0x20, 0xfe, 0x2c, - 0x3e, 0xb7, 0x4c, 0x6a, 0x1e, 0x67, 0x01, 0x98, 0x04, 0x10, 0x2c, 0x5f, 0x42, 0x32, 0xd7, 0x86, - 0x57, 0x3f, 0x44, 0x2a, 0x95, 0x08, 0x17, 0x32, 0x01, 0xf3, 0xcc, 0xd3, 0x38, 0x72, 0x85, 0xe5, - 0x0a, 0x50, 0xf0, 0xe5, 0x28, 0x00, 0x14, 0x23, 0x1e, 0x41, 0x02, 0x46, 0x20, 0xcc, 0x59, 0x6a, - 0x34, 0xea, 0xf6, 0xb0, 0x44, 0xd9, 0x4f, 0x94, 0xa5, 0x71, 0xe4, 0x0a, 0xcb, 0x1c, 0xca, 0x2a, - 0xb4, 0x7b, 0x12, 0x49, 0x5c, 0x64, 0x01, 0x0b, 0xb5, 0xe2, 0x91, 0x8e, 0x34, 0x2f, 0x0c, 0x41, - 0xf6, 0x50, 0xa4, 0x22, 0x14, 0x5f, 0xa5, 0xb9, 0x7b, 0x5a, 0x67, 0xd4, 0xfe, 0x9e, 0xee, 0xd9, - 0x5f, 0x94, 0xc9, 0x12, 0x94, 0x0a, 0xb8, 0x0d, 0x17, 0xa0, 0xc4, 0x3e, 0x77, 0xfc, 0x46, 0xfc, - 0xa3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0x69, 0xf0, 0x08, 0x21, 0x5e, 0x03, 0x8a, 0xb9, 0x40, 0x71, - 0x25, 0x2d, 0xb6, 0xef, 0xfc, 0xa6, 0xaa, 0x72, 0xe7, 0x5f, 0x9f, 0x0c, 0x5a, 0x63, 0xc6, 0xea, - 0x5c, 0x9c, 0x39, 0xda, 0x99, 0x26, 0x87, 0xab, 0x6d, 0xcf, 0xcb, 0xb7, 0xbd, 0xe6, 0x57, 0x33, - 0xfb, 0x36, 0xb6, 0xef, 0xfd, 0x86, 0x44, 0x50, 0xb6, 0x43, 0xfa, 0xff, 0x07, 0xad, 0xf1, 0x45, - 0x3d, 0xf5, 0xaf, 0x6b, 0x27, 0x07, 0xd5, 0x39, 0x8d, 0x4b, 0x67, 0x9c, 0x95, 0xe2, 0xc9, 0x74, - 0xb5, 0xa3, 0xde, 0x7a, 0x47, 0xbd, 0xcd, 0x8e, 0x7a, 0x2f, 0x39, 0x25, 0xab, 0x9c, 0x92, 0x75, - 0x4e, 0xc9, 0x26, 0xa7, 0xe4, 0x3d, 0xa7, 0xe4, 0xf5, 0x83, 0x7a, 0xb7, 0xc3, 0xda, 0xcf, 0xe0, - 0x33, 0x00, 0x00, 0xff, 0xff, 0x30, 0x97, 0x8b, 0x11, 0x4b, 0x02, 0x00, 0x00, + 0x84, 0x0d, 0x11, 0xc5, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x31, 0x8b, 0x35, + 0x4d, 0x69, 0xfe, 0x15, 0xbc, 0xf9, 0x11, 0xfc, 0x58, 0x3d, 0xee, 0x38, 0x10, 0x86, 0x8d, 0x5f, + 0x44, 0xd2, 0x56, 0x91, 0xa1, 0xd0, 0x5b, 0x9e, 0x07, 0x7e, 0xbf, 0x3c, 0x81, 0xf8, 0x67, 0xd1, + 0xa9, 0x21, 0x52, 0x53, 0x96, 0x48, 0xc5, 0xc2, 0x95, 0x8c, 0x79, 0xfa, 0x4c, 0x93, 0x48, 0xb8, + 0xc2, 0x50, 0xc5, 0x81, 0xd1, 0xa7, 0x49, 0xc0, 0x81, 0x4d, 0xa8, 0xe0, 0x31, 0x4f, 0x19, 0xf0, + 0x25, 0x49, 0x52, 0x0d, 0xba, 0x3b, 0xae, 0x50, 0xf2, 0x13, 0x25, 0x49, 0x24, 0x5c, 0x61, 0x88, + 0x43, 0x49, 0x8d, 0xf6, 0x8f, 0x84, 0x84, 0x55, 0x16, 0x90, 0x50, 0x2b, 0x2a, 0xb4, 0xd0, 0xb4, + 0x34, 0x04, 0xd9, 0x7d, 0x99, 0xca, 0x50, 0x9e, 0x2a, 0x73, 0xff, 0xb8, 0xc9, 0xa8, 0xdd, 0x3d, + 0xfd, 0x93, 0xbf, 0xa8, 0x34, 0x8b, 0x41, 0x2a, 0x4e, 0x4d, 0xb8, 0xe2, 0x8a, 0xed, 0x72, 0x87, + 0x6f, 0xc8, 0x3f, 0xb8, 0x66, 0x29, 0x48, 0xf6, 0x38, 0x0f, 0x1e, 0x78, 0x08, 0x57, 0x1c, 0xd8, + 0x92, 0x01, 0xbb, 0x94, 0x06, 0xba, 0xb7, 0x7e, 0x5b, 0xd5, 0xb9, 0xf7, 0x6f, 0x88, 0x46, 0x9d, + 0x29, 0x21, 0x4d, 0x1e, 0x4e, 0x1c, 0xed, 0x4c, 0xb3, 0xfd, 0x7c, 0x3b, 0xf0, 0xec, 0x76, 0xd0, + 0xfe, 0x6a, 0x16, 0xdf, 0xc6, 0xee, 0x9d, 0xdf, 0x92, 0xc0, 0x95, 0xe9, 0xa1, 0xe1, 0xff, 0x51, + 0x67, 0x7a, 0xde, 0x4c, 0xfd, 0xeb, 0xda, 0xd9, 0x5e, 0x7d, 0x4f, 0xeb, 0xc2, 0x19, 0x17, 0x95, + 0x78, 0x36, 0xcf, 0x0b, 0xec, 0xad, 0x0b, 0xec, 0x6d, 0x0a, 0xec, 0xbd, 0x58, 0x8c, 0x72, 0x8b, + 0xd1, 0xda, 0x62, 0xb4, 0xb1, 0x18, 0xbd, 0x5b, 0x8c, 0x5e, 0x3f, 0xb0, 0x77, 0x33, 0x6e, 0xfc, + 0x0d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x0f, 0xd7, 0x36, 0x32, 0x02, 0x00, 0x00, } func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto index d14d4259..fcec5535 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -33,9 +33,9 @@ message PartialObjectMetadataList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; // items contains each of the included items. - repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; + repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go index ef7e7c1e..dff735dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -24,7 +24,7 @@ package v1beta1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PartialObjectMetadataList = map[string]string{ diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 8360d842..19d823ce 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -77,6 +77,8 @@ func (ls Set) AsValidatedSelector() (Selector, error) { // perform any validation. // According to our measurements this is significantly faster // in codepaths that matter at high scale. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func (ls Set) AsSelectorPreValidated() Selector { return SelectorFromValidatedSet(ls) } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 89100438..9e22a005 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -45,6 +45,19 @@ var ( // Requirements is AND of all requirements. type Requirements []Requirement +func (r Requirements) String() string { + var sb strings.Builder + + for i, requirement := range r { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString(requirement.String()) + } + + return sb.String() +} + // Selector represents a label selector. type Selector interface { // Matches returns true if this selector matches the given set of labels. @@ -149,14 +162,12 @@ type Requirement struct { // NewRequirement is the constructor for a Requirement. // If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. -// (6) The key is invalid due to its length, or sequence -// -// of characters. See validateLabelKey for more details. +// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist. +// 2. If the operator is In or NotIn, the values set must be non-empty. +// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// 4. If the operator is Exists or DoesNotExist, the value set must be empty. +// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. // Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList @@ -213,22 +224,15 @@ func (r *Requirement) hasValue(value string) bool { // Matches returns true if the Requirement matches the input Labels. // There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// -// value for that key is in Requirement's value set. -// -// (3) The operator is NotIn, Labels has the Requirement's key and -// -// Labels' value for that key is not in Requirement's value set. -// -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// -// Requirement's key. -// -// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has -// -// the Requirement's key and the corresponding value satisfies mathematical inequality. +// 1. The operator is Exists and Labels has the Requirement's key. +// 2. The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// 3. The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// 4. The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. func (r *Requirement) Matches(ls Labels) bool { switch r.operator { case selection.In, selection.Equals, selection.DoubleEquals: @@ -294,6 +298,13 @@ func (r *Requirement) Values() sets.String { return ret } +// ValuesUnsorted returns a copy of requirement values as passed to NewRequirement without sorting. +func (r *Requirement) ValuesUnsorted() []string { + ret := make([]string, 0, len(r.strValues)) + ret = append(ret, r.strValues...) + return ret +} + // Equal checks the equality of requirement. func (r Requirement) Equal(x Requirement) bool { if r.key != x.key { @@ -872,15 +883,14 @@ func (p *Parser) parseExactValue() (sets.String, error) { // "x in (foo,,baz),y,z notin ()" // // Note: -// -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. +// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// 2. Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// 3. The empty string is a valid VALUE +// 4. A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// 5. A requirement with just !KEY requires that the KEY not exist. func Parse(selector string, opts ...field.PathOption) (Selector, error) { parsedSelector, err := parse(selector, field.ToPath(opts...)) if err == nil { @@ -948,6 +958,8 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. // A nil and empty Sets are considered equivalent to Everything(). // It assumes that Set is already validated and doesn't do any validation. +// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector +// instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} @@ -969,3 +981,76 @@ func SelectorFromValidatedSet(ls Set) Selector { func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) { return parse(selector, field.ToPath(opts...)) } + +// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike +// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying +// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered +// equivalent to Everything(). +// +// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these +// constraints are not met, Set.AsValidatedSelector should be preferred +// +// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to +// the less optimized version. +type ValidatedSetSelector Set + +func (s ValidatedSetSelector) Matches(labels Labels) bool { + for k, v := range s { + if !labels.Has(k) || v != labels.Get(k) { + return false + } + } + return true +} + +func (s ValidatedSetSelector) Empty() bool { + return len(s) == 0 +} + +func (s ValidatedSetSelector) String() string { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + // Ensure deterministic output + sort.Strings(keys) + b := strings.Builder{} + for i, key := range keys { + v := s[key] + b.Grow(len(key) + 2 + len(v)) + if i != 0 { + b.WriteString(",") + } + b.WriteString(key) + b.WriteString("=") + b.WriteString(v) + } + return b.String() +} + +func (s ValidatedSetSelector) Add(r ...Requirement) Selector { + return s.toFullSelector().Add(r...) +} + +func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) { + return s.toFullSelector().Requirements() +} + +func (s ValidatedSetSelector) DeepCopySelector() Selector { + res := make(ValidatedSetSelector, len(s)) + for k, v := range s { + res[k] = v + } + return res +} + +func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) { + v, f := s[label] + return v, f +} + +func (s ValidatedSetSelector) toFullSelector() Selector { + return SelectorFromValidatedSet(Set(s)) +} + +var _ Selector = ValidatedSetSelector{} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 7fc513dd..73f85286 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -45,7 +45,6 @@ func NewCodec(e Encoder, d Decoder) Codec { // Encode is a convenience wrapper for encoding to a []byte from an Encoder func Encode(e Encoder, obj Object) ([]byte, error) { - // TODO: reuse buffer buf := &bytes.Buffer{} if err := e.Encode(obj, buf); err != nil { return nil, err diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 90bf487e..62eb27af 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -231,7 +231,7 @@ func (c *fromUnstructuredContext) pushKey(key string) { } -// FromUnstructuredWIthValidation converts an object from map[string]interface{} representation into a concrete type. +// FromUnstructuredWithValidation converts an object from map[string]interface{} representation into a concrete type. // It uses encoding/json/Unmarshaler if object implements it or reflection if not. // It takes a validationDirective that indicates how to behave when it encounters unknown fields. func (c *unstructuredConverter) FromUnstructuredWithValidation(u map[string]interface{}, obj interface{}, returnUnknownFields bool) error { @@ -465,7 +465,7 @@ func sliceFromUnstructured(sv, dv reflect.Value, ctx *fromUnstructuredContext) e } dv.SetBytes(data) } else { - dv.Set(reflect.Zero(dt)) + dv.Set(reflect.MakeSlice(dt, 0, 0)) } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go index 9056397f..60c000bc 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -18,16 +18,77 @@ package runtime import ( "bytes" - "encoding/json" "errors" + "fmt" + + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" + "k8s.io/apimachinery/pkg/util/json" ) +// RawExtension intentionally avoids implementing value.UnstructuredConverter for now because the +// signature of ToUnstructured does not allow returning an error value in cases where the conversion +// is not possible (content type is unrecognized or bytes don't match content type). +func rawToUnstructured(raw []byte, contentType string) (interface{}, error) { + switch contentType { + case ContentTypeJSON: + var u interface{} + if err := json.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as JSON: %w", err) + } + return u, nil + case ContentTypeCBOR: + var u interface{} + if err := cbor.Unmarshal(raw, &u); err != nil { + return nil, fmt.Errorf("failed to parse RawExtension bytes as CBOR: %w", err) + } + return u, nil + default: + return nil, fmt.Errorf("cannot convert RawExtension with unrecognized content type to unstructured") + } +} + +func (re RawExtension) guessContentType() string { + switch { + case bytes.HasPrefix(re.Raw, cborSelfDescribed): + return ContentTypeCBOR + case len(re.Raw) > 0: + switch re.Raw[0] { + case '\t', '\r', '\n', ' ', '{', '[', 'n', 't', 'f', '"', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + // Prefixes for the four whitespace characters, objects, arrays, strings, numbers, true, false, and null. + return ContentTypeJSON + } + } + return "" +} + func (re *RawExtension) UnmarshalJSON(in []byte) error { if re == nil { return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") } - if !bytes.Equal(in, []byte("null")) { - re.Raw = append(re.Raw[0:0], in...) + if bytes.Equal(in, []byte("null")) { + return nil + } + re.Raw = append(re.Raw[0:0], in...) + return nil +} + +var ( + cborNull = []byte{0xf6} + cborSelfDescribed = []byte{0xd9, 0xd9, 0xf7} +) + +func (re *RawExtension) UnmarshalCBOR(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalCBOR on nil pointer") + } + if !bytes.Equal(in, cborNull) { + if !bytes.HasPrefix(in, cborSelfDescribed) { + // The self-described CBOR tag doesn't change the interpretation of the data + // item it encloses, but it is useful as a magic number. Its encoding is + // also what is used to implement the CBOR RecognizingDecoder. + re.Raw = append(re.Raw[:0], cborSelfDescribed...) + } + re.Raw = append(re.Raw, in...) } return nil } @@ -46,6 +107,35 @@ func (re RawExtension) MarshalJSON() ([]byte, error) { } return []byte("null"), nil } - // TODO: Check whether ContentType is actually JSON before returning it. - return re.Raw, nil + + contentType := re.guessContentType() + if contentType == ContentTypeJSON { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return json.Marshal(u) +} + +func (re RawExtension) MarshalCBOR() ([]byte, error) { + if re.Raw == nil { + if re.Object != nil { + return cbor.Marshal(re.Object) + } + return cbor.Marshal(nil) + } + + contentType := re.guessContentType() + if contentType == ContentTypeCBOR { + return re.Raw, nil + } + + u, err := rawToUnstructured(re.Raw, contentType) + if err != nil { + return nil, err + } + return cbor.Marshal(u) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index ec677a7d..2e40e140 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// source: k8s.io/apimachinery/pkg/runtime/generated.proto package runtime @@ -45,7 +45,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *RawExtension) Reset() { *m = RawExtension{} } func (*RawExtension) ProtoMessage() {} func (*RawExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{0} + return fileDescriptor_2e0e4b920403a48c, []int{0} } func (m *RawExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,7 +73,7 @@ var xxx_messageInfo_RawExtension proto.InternalMessageInfo func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{1} + return fileDescriptor_2e0e4b920403a48c, []int{1} } func (m *TypeMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -101,7 +101,7 @@ var xxx_messageInfo_TypeMeta proto.InternalMessageInfo func (m *Unknown) Reset() { *m = Unknown{} } func (*Unknown) ProtoMessage() {} func (*Unknown) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{2} + return fileDescriptor_2e0e4b920403a48c, []int{2} } func (m *Unknown) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -133,35 +133,34 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) -} - -var fileDescriptor_9d3c45d7f546725c = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0xaa, 0x13, 0x31, - 0x14, 0xc6, 0x27, 0xb7, 0x85, 0x7b, 0x4d, 0x0b, 0x57, 0xe2, 0xc2, 0xd1, 0x45, 0xe6, 0xd2, 0x95, - 0x77, 0x61, 0x02, 0x17, 0x04, 0xb7, 0x9d, 0x52, 0x50, 0x44, 0x90, 0xe0, 0x1f, 0x70, 0x65, 0x3a, - 0x13, 0xa7, 0x61, 0xe8, 0xc9, 0x90, 0x66, 0x1c, 0xbb, 0xf3, 0x11, 0x7c, 0xac, 0x2e, 0xbb, 0xec, - 0xaa, 0xd8, 0xf1, 0x21, 0xdc, 0x4a, 0xd3, 0xb4, 0x56, 0x5d, 0x74, 0x97, 0x73, 0xbe, 0xef, 0xf7, - 0x9d, 0x73, 0x20, 0xf8, 0x45, 0xf9, 0x7c, 0xce, 0xb4, 0xe1, 0x65, 0x3d, 0x51, 0x16, 0x94, 0x53, - 0x73, 0xfe, 0x45, 0x41, 0x6e, 0x2c, 0x0f, 0x82, 0xac, 0xf4, 0x4c, 0x66, 0x53, 0x0d, 0xca, 0x2e, - 0x78, 0x55, 0x16, 0xdc, 0xd6, 0xe0, 0xf4, 0x4c, 0xf1, 0x42, 0x81, 0xb2, 0xd2, 0xa9, 0x9c, 0x55, - 0xd6, 0x38, 0x43, 0x92, 0x3d, 0xc0, 0x4e, 0x01, 0x56, 0x95, 0x05, 0x0b, 0xc0, 0xe3, 0xa7, 0x85, - 0x76, 0xd3, 0x7a, 0xc2, 0x32, 0x33, 0xe3, 0x85, 0x29, 0x0c, 0xf7, 0xdc, 0xa4, 0xfe, 0xec, 0x2b, - 0x5f, 0xf8, 0xd7, 0x3e, 0x6f, 0x70, 0x8b, 0xfb, 0x42, 0x36, 0xe3, 0xaf, 0x4e, 0xc1, 0x5c, 0x1b, - 0x20, 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0x37, 0xe8, 0x49, 0x3f, 0xbd, 0x6c, 0x37, 0x49, 0x47, - 0xc8, 0x46, 0xec, 0x7a, 0x83, 0x4f, 0xf8, 0xea, 0xed, 0xa2, 0x52, 0xaf, 0x95, 0x93, 0xe4, 0x0e, - 0x63, 0x59, 0xe9, 0xf7, 0xca, 0xee, 0x20, 0xef, 0xbe, 0x97, 0x92, 0xe5, 0x26, 0x89, 0xda, 0x4d, - 0x82, 0x87, 0x6f, 0x5e, 0x06, 0x45, 0x9c, 0xb8, 0xc8, 0x0d, 0xee, 0x96, 0x1a, 0xf2, 0xf8, 0xc2, - 0xbb, 0xfb, 0xc1, 0xdd, 0x7d, 0xa5, 0x21, 0x17, 0x5e, 0x19, 0xfc, 0x42, 0xf8, 0xf2, 0x1d, 0x94, - 0x60, 0x1a, 0x20, 0x1f, 0xf0, 0x95, 0x0b, 0xd3, 0x7c, 0x7e, 0xef, 0xee, 0x96, 0x9d, 0xb9, 0x9d, - 0x1d, 0xd6, 0x4b, 0xef, 0x87, 0xf0, 0xe3, 0xc2, 0xe2, 0x18, 0x76, 0xb8, 0xf0, 0xe2, 0xff, 0x0b, - 0xc9, 0x10, 0x5f, 0x67, 0x06, 0x9c, 0x02, 0x37, 0x86, 0xcc, 0xe4, 0x1a, 0x8a, 0xb8, 0xe3, 0x97, - 0x7d, 0x18, 0xf2, 0xae, 0x47, 0x7f, 0xcb, 0xe2, 0x5f, 0x3f, 0x79, 0x86, 0x7b, 0xa1, 0xb5, 0x1b, - 0x1d, 0x77, 0x3d, 0xfe, 0x20, 0xe0, 0xbd, 0xd1, 0x1f, 0x49, 0x9c, 0xfa, 0xd2, 0xf1, 0x72, 0x4b, - 0xa3, 0xd5, 0x96, 0x46, 0xeb, 0x2d, 0x8d, 0xbe, 0xb5, 0x14, 0x2d, 0x5b, 0x8a, 0x56, 0x2d, 0x45, - 0xeb, 0x96, 0xa2, 0x1f, 0x2d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0x63, 0x72, 0xe6, 0xb7, 0xfc, 0x0e, - 0x00, 0x00, 0xff, 0xff, 0x1f, 0x32, 0xd5, 0x68, 0x68, 0x02, 0x00, 0x00, + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_2e0e4b920403a48c) +} + +var fileDescriptor_2e0e4b920403a48c = []byte{ + // 365 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x6b, 0x22, 0x31, + 0x18, 0xc6, 0x27, 0x2a, 0xe8, 0x46, 0xc1, 0x25, 0x7b, 0xd8, 0xd9, 0x3d, 0x64, 0xc4, 0xd3, 0x7a, + 0xd8, 0x0c, 0x08, 0x85, 0x5e, 0x1d, 0xf1, 0x50, 0x4a, 0xa1, 0x84, 0xfe, 0x81, 0x9e, 0x1a, 0x67, + 0xd2, 0x31, 0x0c, 0x26, 0xc3, 0x18, 0x99, 0x7a, 0xeb, 0x47, 0xe8, 0xc7, 0xf2, 0xe8, 0xd1, 0x93, + 0xd4, 0xe9, 0x87, 0xe8, 0xb5, 0x18, 0xa3, 0xb5, 0xed, 0xc1, 0x5b, 0xde, 0xf7, 0x79, 0x7e, 0xcf, + 0xfb, 0xbe, 0x10, 0xe8, 0x27, 0xa7, 0x13, 0x22, 0x94, 0xcf, 0x52, 0x31, 0x66, 0xe1, 0x48, 0x48, + 0x9e, 0xcd, 0xfc, 0x34, 0x89, 0xfd, 0x6c, 0x2a, 0xb5, 0x18, 0x73, 0x3f, 0xe6, 0x92, 0x67, 0x4c, + 0xf3, 0x88, 0xa4, 0x99, 0xd2, 0x0a, 0x79, 0x5b, 0x80, 0x1c, 0x02, 0x24, 0x4d, 0x62, 0x62, 0x81, + 0xbf, 0xff, 0x63, 0xa1, 0x47, 0xd3, 0x21, 0x09, 0xd5, 0xd8, 0x8f, 0x55, 0xac, 0x7c, 0xc3, 0x0d, + 0xa7, 0x0f, 0xa6, 0x32, 0x85, 0x79, 0x6d, 0xf3, 0xda, 0x1d, 0xd8, 0xa0, 0x2c, 0x1f, 0x3c, 0x6a, + 0x2e, 0x27, 0x42, 0x49, 0xf4, 0x07, 0x96, 0x33, 0x96, 0xbb, 0xa0, 0x05, 0xfe, 0x35, 0x82, 0x6a, + 0xb1, 0xf2, 0xca, 0x94, 0xe5, 0x74, 0xd3, 0x6b, 0xdf, 0xc3, 0xda, 0xd5, 0x2c, 0xe5, 0x17, 0x5c, + 0x33, 0xd4, 0x85, 0x90, 0xa5, 0xe2, 0x86, 0x67, 0x1b, 0xc8, 0xb8, 0x7f, 0x04, 0x68, 0xbe, 0xf2, + 0x9c, 0x62, 0xe5, 0xc1, 0xde, 0xe5, 0x99, 0x55, 0xe8, 0x81, 0x0b, 0xb5, 0x60, 0x25, 0x11, 0x32, + 0x72, 0x4b, 0xc6, 0xdd, 0xb0, 0xee, 0xca, 0xb9, 0x90, 0x11, 0x35, 0x4a, 0xfb, 0x0d, 0xc0, 0xea, + 0xb5, 0x4c, 0xa4, 0xca, 0x25, 0xba, 0x85, 0x35, 0x6d, 0xa7, 0x99, 0xfc, 0x7a, 0xb7, 0x43, 0x8e, + 0xdc, 0x4e, 0x76, 0xeb, 0x05, 0x3f, 0x6d, 0xf8, 0x7e, 0x61, 0xba, 0x0f, 0xdb, 0x5d, 0x58, 0xfa, + 0x7e, 0x21, 0xea, 0xc1, 0x66, 0xa8, 0xa4, 0xe6, 0x52, 0x0f, 0x64, 0xa8, 0x22, 0x21, 0x63, 0xb7, + 0x6c, 0x96, 0xfd, 0x6d, 0xf3, 0x9a, 0xfd, 0xcf, 0x32, 0xfd, 0xea, 0x47, 0x27, 0xb0, 0x6e, 0x5b, + 0x9b, 0xd1, 0x6e, 0xc5, 0xe0, 0xbf, 0x2c, 0x5e, 0xef, 0x7f, 0x48, 0xf4, 0xd0, 0x17, 0x0c, 0xe6, + 0x6b, 0xec, 0x2c, 0xd6, 0xd8, 0x59, 0xae, 0xb1, 0xf3, 0x54, 0x60, 0x30, 0x2f, 0x30, 0x58, 0x14, + 0x18, 0x2c, 0x0b, 0x0c, 0x5e, 0x0a, 0x0c, 0x9e, 0x5f, 0xb1, 0x73, 0xe7, 0x1d, 0xf9, 0x2d, 0xef, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x9b, 0x09, 0xb3, 0x4f, 0x02, 0x00, 0x00, } func (m *RawExtension) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 7bd1a3a6..cc0a77bb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -236,10 +236,14 @@ func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { gvk = preferredGVK } } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err + + // The gvk only needs to be set if not already as desired. + if gvk != oldGVK { + kind.SetGroupVersionKind(gvk) + defer kind.SetGroupVersionKind(oldGVK) + } + + return e.Encoder.Encode(obj, stream) } // WithoutVersionDecoder clears the group version kind of a deserialized object. @@ -257,3 +261,26 @@ func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersion } return obj, gvk, err } + +type encoderWithAllocator struct { + encoder EncoderWithAllocator + memAllocator MemoryAllocator +} + +// NewEncoderWithAllocator returns a new encoder +func NewEncoderWithAllocator(e EncoderWithAllocator, a MemoryAllocator) Encoder { + return &encoderWithAllocator{ + encoder: e, + memAllocator: a, + } +} + +// Encode writes the provided object to the nested writer +func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { + return e.encoder.EncodeWithAllocator(obj, w, e.memAllocator) +} + +// Identifier returns identifier of this encoder. +func (e *encoderWithAllocator) Identifier() Identifier { + return e.encoder.Identifier() +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 710a9779..e89ea893 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -365,4 +365,9 @@ type Unstructured interface { // error should terminate the iteration. If IsList() returns false, this method should return an error // instead of calling the provided function. EachListItem(func(Object) error) error + // EachListItemWithAlloc works like EachListItem, but avoids retaining references to a slice of items. + // It does this by making a shallow copy of non-pointer items before passing them to fn. + // + // If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency. + EachListItemWithAlloc(func(Object) error) error } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 46b1e787..7a26d279 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// source: k8s.io/apimachinery/pkg/runtime/schema/generated.proto package schema @@ -39,21 +39,20 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) + proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_25f8f0eed21c6089) } -var fileDescriptor_0462724132518e0d = []byte{ - // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xad, 0x8e, 0xc3, 0x30, - 0x0c, 0xc0, 0xf1, 0x84, 0x1e, 0x3c, 0x78, 0xc0, 0xb0, 0xec, 0x62, 0x7a, 0xf8, 0xf0, 0xa4, 0xf1, - 0xb1, 0xb4, 0xf5, 0xd2, 0x28, 0xca, 0x87, 0xd2, 0x64, 0xd2, 0xd8, 0x1e, 0x61, 0x8f, 0x55, 0x58, - 0x58, 0xb8, 0x66, 0x2f, 0x32, 0x29, 0x2d, 0x18, 0x1c, 0xf3, 0x5f, 0xd6, 0xcf, 0xf2, 0xd7, 0xd1, - 0xfc, 0x8d, 0x42, 0x7b, 0x34, 0xb9, 0xa5, 0xe8, 0x28, 0xd1, 0x88, 0x17, 0x72, 0xbd, 0x8f, 0xb8, - 0x2f, 0x64, 0xd0, 0x56, 0x76, 0x83, 0x76, 0x14, 0xaf, 0x18, 0x8c, 0xc2, 0x98, 0x5d, 0xd2, 0x96, - 0x70, 0xec, 0x06, 0xb2, 0x12, 0x15, 0x39, 0x8a, 0x32, 0x51, 0x2f, 0x42, 0xf4, 0xc9, 0x7f, 0x37, - 0x9b, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0xc4, 0xe6, 0x7e, 0x7e, 0x95, 0x4e, 0x43, 0x6e, - 0x45, 0xe7, 0x2d, 0x2a, 0xaf, 0x3c, 0x56, 0xde, 0xe6, 0x73, 0xad, 0x1a, 0x75, 0xda, 0xce, 0xfe, - 0x1f, 0xa6, 0x15, 0xd8, 0xbc, 0x02, 0x5b, 0x56, 0x60, 0xb7, 0x02, 0x7c, 0x2a, 0xc0, 0xe7, 0x02, - 0x7c, 0x29, 0xc0, 0x1f, 0x05, 0xf8, 0xfd, 0x09, 0xec, 0xd4, 0x7c, 0xf6, 0xf4, 0x2b, 0x00, 0x00, - 0xff, 0xff, 0x12, 0xb4, 0xae, 0x48, 0xf6, 0x00, 0x00, 0x00, +var fileDescriptor_25f8f0eed21c6089 = []byte{ + // 170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xa1, 0x0e, 0xc2, 0x30, + 0x10, 0xc6, 0xf1, 0xd6, 0x22, 0x91, 0x88, 0x93, 0x73, 0xdc, 0x39, 0x82, 0x46, 0xf3, 0x04, 0xb8, + 0x6e, 0x94, 0xae, 0x59, 0xba, 0x6b, 0xba, 0x4e, 0xe0, 0x78, 0x04, 0x1e, 0x6b, 0x72, 0x72, 0x92, + 0x95, 0x17, 0x21, 0x69, 0x11, 0x48, 0xdc, 0xfd, 0xc5, 0xef, 0xf2, 0x6d, 0x0e, 0xdd, 0x71, 0x40, + 0xcb, 0xa4, 0xbc, 0x75, 0xaa, 0x69, 0x6d, 0xaf, 0xc3, 0x9d, 0x7c, 0x67, 0x28, 0x8c, 0x7d, 0xb4, + 0x4e, 0xd3, 0xd0, 0xb4, 0xda, 0x29, 0x32, 0xba, 0xd7, 0x41, 0x45, 0x7d, 0x45, 0x1f, 0x38, 0xf2, + 0xb6, 0x2a, 0x0e, 0x7f, 0x1d, 0xfa, 0xce, 0xe0, 0xd7, 0x61, 0x71, 0xbb, 0xbd, 0xb1, 0xb1, 0x1d, + 0x6b, 0x6c, 0xd8, 0x91, 0x61, 0xc3, 0x94, 0x79, 0x3d, 0xde, 0x72, 0xe5, 0xc8, 0x57, 0x79, 0x7b, + 0x3a, 0x4f, 0x2b, 0x88, 0x79, 0x05, 0xb1, 0xac, 0x20, 0x1e, 0x09, 0xe4, 0x94, 0x40, 0xce, 0x09, + 0xe4, 0x92, 0x40, 0xbe, 0x12, 0xc8, 0xe7, 0x1b, 0xc4, 0xa5, 0xfa, 0x6f, 0xf4, 0x27, 0x00, 0x00, + 0xff, 0xff, 0x97, 0xb8, 0x4d, 0x1f, 0xdd, 0x00, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index b21eb664..d1c37c94 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -39,7 +39,7 @@ func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { // ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com` // and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended // but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then -// `*GroupVersionResource` is nil. +// `*GroupVersionKind` is nil. // `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind` func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) { var gvk *GroupVersionKind @@ -191,8 +191,7 @@ func (gv GroupVersion) Identifier() string { // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { for _, gvk := range kinds { if gvk.Group == gv.Group && gvk.Version == gv.Version { @@ -240,8 +239,7 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // GroupVersions can be used to represent a set of desired group versions. // TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. // TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) -// -// in fewer places. +// in fewer places. type GroupVersions []GroupVersion // Identifier implements runtime.GroupVersioner interface. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 18b25a99..a5b11671 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -118,8 +118,7 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// -// every version with particular schemas. Resolve this method at that point. +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { s.addObservedVersion(version) s.AddKnownTypes(version, types...) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go new file mode 100644 index 00000000..cd78b1df --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package direct provides functions for marshaling and unmarshaling between arbitrary Go values and +// CBOR data, with behavior that is compatible with that of the CBOR serializer. In particular, +// types that implement cbor.Marshaler and cbor.Unmarshaler should use these functions. +package direct + +import ( + "k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes" +) + +func Marshal(src interface{}) ([]byte, error) { + return modes.Encode.Marshal(src) +} + +func Unmarshal(src []byte, dst interface{}) error { + return modes.Decode.Unmarshal(src, dst) +} + +func Diagnose(src []byte) (string, error) { + return modes.Diagnostic.Diagnose(src) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go new file mode 100644 index 00000000..f14cbd6b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "bytes" + "sync" +) + +var buffers = BufferProvider{p: new(sync.Pool)} + +type buffer struct { + bytes.Buffer +} + +type pool interface { + Get() interface{} + Put(interface{}) +} + +type BufferProvider struct { + p pool +} + +func (b *BufferProvider) Get() *buffer { + if buf, ok := b.p.Get().(*buffer); ok { + return buf + } + return &buffer{} +} + +func (b *BufferProvider) Put(buf *buffer) { + if buf.Cap() > 3*1024*1024 /* Default MaxRequestBodyBytes */ { + // Objects in a sync.Pool are assumed to be fungible. This is not a good assumption + // for pools of *bytes.Buffer because a *bytes.Buffer's underlying array grows as + // needed to accommodate writes. In Kubernetes, apiservers tend to encode "small" + // objects very frequently and much larger objects (especially large lists) only + // occasionally. Under steady load, pooled buffers tend to be borrowed frequently + // enough to prevent them from being released. Over time, each buffer is used to + // encode a large object and its capacity increases accordingly. The result is that + // practically all buffers in the pool retain much more capacity than needed to + // encode most objects. + + // As a basic mitigation for the worst case, buffers with more capacity than the + // default max request body size are never returned to the pool. + // TODO: Optimize for higher buffer utilization. + return + } + buf.Reset() + b.p.Put(buf) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go new file mode 100644 index 00000000..858529e9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -0,0 +1,422 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "encoding" + "encoding/json" + "errors" + "fmt" + "reflect" + "sync" + + "github.com/fxamacker/cbor/v2" +) + +// Returns a non-nil error if and only if the argument's type (or one of its component types, for +// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing +// cbor.Marshaler and likewise for the respective Unmarshaler interfaces. +// +// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic +// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be +// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree +// types in client programs. +func RejectCustomMarshalers(v interface{}) error { + if v == nil { + return nil + } + rv := reflect.ValueOf(v) + if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil { + return fmt.Errorf("unable to serialize %T: %w", v, err) + } + return nil +} + +// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the +// decoder shouldn't be able to contain cycles, but practically any object can be passed to the +// encoder. +var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)") + +// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing +// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of +// this mechanism. +const maxDepth = 2048 + +var marshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Marshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Marshaler](), + reflect.TypeFor[encoding.TextMarshaler](), + }, +} + +var unmarshalerCache = checkers{ + cborInterface: reflect.TypeFor[cbor.Unmarshaler](), + nonCBORInterfaces: []reflect.Type{ + reflect.TypeFor[json.Unmarshaler](), + reflect.TypeFor[encoding.TextUnmarshaler](), + }, + assumeAddressableValues: true, +} + +// checker wraps a function for dynamically checking a value of a specific type for custom JSON +// behaviors not matched by a custom CBOR behavior. +type checker struct { + // check returns a non-nil error if the given value might be marshalled to or from CBOR + // using the default behavior for its kind, but marshalled to or from JSON using custom + // behavior. + check func(rv reflect.Value, depth int) error + + // safe returns true if all values of this type are safe from mismatched custom marshalers. + safe func() bool +} + +// TODO: stale +// Having a single addressable checker for comparisons lets us prune and collapse parts of the +// object traversal that are statically known to be safe. Depending on the type, it may be +// unnecessary to inspect each value of that type. For example, no value of the built-in type bool +// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a +// distinct type from bool). +var noop = checker{ + safe: func() bool { + return true + }, + check: func(rv reflect.Value, depth int) error { + return nil + }, +} + +type checkers struct { + m sync.Map // reflect.Type => *checker + + cborInterface reflect.Type + nonCBORInterfaces []reflect.Type + + assumeAddressableValues bool +} + +func (cache *checkers) getChecker(rt reflect.Type) checker { + if ptr, ok := cache.m.Load(rt); ok { + return *ptr.(*checker) + } + + return cache.getCheckerInternal(rt, nil) +} + +// linked list node representing the path from a composite type to an element type +type path struct { + Type reflect.Type + Parent *path +} + +func (p path) cyclic(rt reflect.Type) bool { + for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent { + if ancestor.Type == rt { + return true + } + } + return false +} + +func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) { + // Store a placeholder cache entry first to handle cyclic types. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Done() + c = checker{ + safe: func() bool { + wg.Wait() + return c.safe() + }, + check: func(rv reflect.Value, depth int) error { + wg.Wait() + return c.check(rv, depth) + }, + } + if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + // Someone else stored an entry for this type, use it. + return *actual.(*checker) + } + + // Take a nonreflective path for the unstructured container types. They're common and + // usually nested inside one another. + switch rt { + case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}](): + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + return checkUnstructuredValue(cache, rv.Interface(), depth) + }, + } + } + + // It's possible that one of the relevant interfaces is implemented on a type with a pointer + // receiver, but that a particular value of that type is not addressable. For example: + // + // func (Foo) MarshalText() ([]byte, error) { ... } + // func (*Foo) MarshalCBOR() ([]byte, error) { ... } + // + // Both methods are in the method set of *Foo, but the method set of Foo contains only + // MarshalText. + // + // Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text + // interface with a pointer receiver are always accessible. Only the unmarshaler check + // assumes that CBOR methods with pointer receivers are accessible. + + if rt.Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if rt.Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) { + return noop + } + for _, unsafe := range cache.nonCBORInterfaces { + if reflect.PointerTo(rt).Implements(unsafe) { + err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe) + return checker{ + safe: func() bool { + return false + }, + check: func(reflect.Value, int) error { + return err + }, + } + } + } + + self := &path{Type: rt, Parent: parent} + + switch rt.Kind() { + case reflect.Array: + ce := cache.getCheckerInternal(rt.Elem(), self) + rtlen := rt.Len() + if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rtlen; i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Interface: + // All interface values have to be checked because their dynamic type might + // implement one of the interesting interfaces or be composed of another type that + // does. + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + // Unpacking interfaces must count against recursion depth, + // consider this cycle: + // > var i interface{} + // > var p *interface{} = &i + // > i = p + // > rv := reflect.ValueOf(i) + // > for { + // > rv = rv.Elem() + // > } + if depth <= 0 { + return errMaxDepthExceeded + } + rv = rv.Elem() + return cache.getChecker(rv.Type()).check(rv, depth-1) + }, + } + + case reflect.Map: + rtk := rt.Key() + ck := cache.getCheckerInternal(rtk, self) + rte := rt.Elem() + ce := cache.getCheckerInternal(rte, self) + if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + iter := rv.MapRange() + rvk := reflect.New(rtk).Elem() + rve := reflect.New(rte).Elem() + for iter.Next() { + rvk.SetIterKey(iter) + if err := ck.check(rvk, depth-1); err != nil { + return err + } + rve.SetIterValue(iter) + if err := ce.check(rve, depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Pointer: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if rv.IsNil() { + return nil + } + if depth <= 0 { + return errMaxDepthExceeded + } + return ce.check(rv.Elem(), depth-1) + }, + } + + case reflect.Slice: + ce := cache.getCheckerInternal(rt.Elem(), self) + if !self.cyclic(rt.Elem()) && ce.safe() { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for i := 0; i < rv.Len(); i++ { + if err := ce.check(rv.Index(i), depth-1); err != nil { + return err + } + } + return nil + }, + } + + case reflect.Struct: + type field struct { + Index int + Checker checker + } + var fields []field + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + cf := cache.getCheckerInternal(f.Type, self) + if !self.cyclic(f.Type) && cf.safe() { + continue + } + fields = append(fields, field{Index: i, Checker: cf}) + } + if len(fields) == 0 { + return noop + } + return checker{ + safe: func() bool { + return false + }, + check: func(rv reflect.Value, depth int) error { + if depth <= 0 { + return errMaxDepthExceeded + } + for _, fi := range fields { + if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil { + return err + } + } + return nil + }, + } + + default: + // Not a serializable composite type (funcs and channels are composite types but are + // rejected by JSON and CBOR serialization). + return noop + + } +} + +func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error { + switch v := v.(type) { + case nil, bool, int64, float64, string: + return nil + case []interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + case map[string]interface{}: + if depth <= 0 { + return errMaxDepthExceeded + } + for _, element := range v { + if err := checkUnstructuredValue(cache, element, depth-1); err != nil { + return err + } + } + return nil + default: + // Unmarshaling an unstructured doesn't use other dynamic types, but nothing + // prevents inserting values with arbitrary dynamic types into unstructured content, + // as long as they can be marshalled. + rv := reflect.ValueOf(v) + return cache.getChecker(rv.Type()).check(rv, depth) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go new file mode 100644 index 00000000..895b0def --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "reflect" + + "github.com/fxamacker/cbor/v2" +) + +var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry { + var opts []func(*cbor.SimpleValueRegistry) error + for sv := 0; sv <= 255; sv++ { + // Reject simple values 0-19, 23, and 32-255. The simple values 24-31 are reserved + // and considered ill-formed by the CBOR specification. We only accept false (20), + // true (21), and null (22). + switch sv { + case 20: // false + case 21: // true + case 22: // null + case 24, 25, 26, 27, 28, 29, 30, 31: // reserved + default: + opts = append(opts, cbor.WithRejectedSimpleValue(cbor.SimpleValue(sv))) + } + } + simpleValues, err := cbor.NewSimpleValueRegistryFromDefaults(opts...) + if err != nil { + panic(err) + } + return simpleValues +}() + +var Decode cbor.DecMode = func() cbor.DecMode { + decode, err := cbor.DecOptions{ + // Maps with duplicate keys are well-formed but invalid according to the CBOR spec + // and never acceptable. Unlike the JSON serializer, inputs containing duplicate map + // keys are rejected outright and not surfaced as a strict decoding error. + DupMapKey: cbor.DupMapKeyEnforcedAPF, + + // For JSON parity, decoding an RFC3339 string into time.Time needs to be accepted + // with or without tagging. If a tag number is present, it must be valid. + TimeTag: cbor.DecTagOptional, + + // Observed depth up to 16 in fuzzed batch/v1 CronJobList. JSON implementation limit + // is 10000. + MaxNestedLevels: 64, + + MaxArrayElements: 1024, + MaxMapPairs: 1024, + + // Indefinite-length sequences aren't produced by this serializer, but other + // implementations can. + IndefLength: cbor.IndefLengthAllowed, + + // Accept inputs that contain CBOR tags. + TagsMd: cbor.TagsAllowed, + + // Decode type 0 (unsigned integer) as int64. + // TODO: IntDecConvertSignedOrFail errors on overflow, JSON will try to fall back to float64. + IntDec: cbor.IntDecConvertSignedOrFail, + + // Disable producing map[cbor.ByteString]interface{}, which is not acceptable for + // decodes into interface{}. + MapKeyByteString: cbor.MapKeyByteStringForbidden, + + // Error on map keys that don't map to a field in the destination struct. + ExtraReturnErrors: cbor.ExtraDecErrorUnknownField, + + // Decode maps into concrete type map[string]interface{} when the destination is an + // interface{}. + DefaultMapType: reflect.TypeOf(map[string]interface{}(nil)), + + // A CBOR text string whose content is not a valid UTF-8 sequence is well-formed but + // invalid according to the CBOR spec. Reject invalid inputs. Encoders are + // responsible for ensuring that all text strings they produce contain valid UTF-8 + // sequences and may use the byte string major type to encode strings that have not + // been validated. + UTF8: cbor.UTF8RejectInvalid, + + // Never make a case-insensitive match between a map key and a struct field. + FieldNameMatching: cbor.FieldNameMatchingCaseSensitive, + + // Produce string concrete values when decoding a CBOR byte string into interface{}. + DefaultByteStringType: reflect.TypeOf(""), + + // Allow CBOR byte strings to be decoded into string destination values. If a byte + // string is enclosed in an "expected later encoding" tag + // (https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), then the text + // encoding indicated by that tag (e.g. base64) will be applied to the contents of + // the byte string. + ByteStringToString: cbor.ByteStringToStringAllowedWithExpectedLaterEncoding, + + // Allow CBOR byte strings to match struct fields when appearing as a map key. + FieldNameByteString: cbor.FieldNameByteStringAllowed, + + // When decoding an unrecognized tag to interface{}, return the decoded tag content + // instead of the default, a cbor.Tag representing a (number, content) pair. + UnrecognizedTagToAny: cbor.UnrecognizedTagContentToAny, + + // Decode time tags to interface{} as strings containing RFC 3339 timestamps. + TimeTagToAny: cbor.TimeTagToRFC3339Nano, + + // For parity with JSON, strings can be decoded into time.Time if they are RFC 3339 + // timestamps. + ByteStringToTime: cbor.ByteStringToTimeAllowed, + + // Reject NaN and infinite floating-point values since they don't have a JSON + // representation (RFC 8259 Section 6). + NaN: cbor.NaNDecodeForbidden, + Inf: cbor.InfDecodeForbidden, + + // When unmarshaling a byte string into a []byte, assume that the byte string + // contains base64-encoded bytes, unless explicitly counterindicated by an "expected + // later encoding" tag. This is consistent with the because of unmarshaling a JSON + // text into a []byte. + ByteStringExpectedFormat: cbor.ByteStringExpectedBase64, + + // Reject the arbitrary-precision integer tags because they can't be faithfully + // roundtripped through the allowable Unstructured types. + BignumTag: cbor.BignumTagForbidden, + + // Reject anything other than the simple values true, false, and null. + SimpleValues: simpleValues, + + // Disable default recognition of types implementing encoding.BinaryUnmarshaler, + // which is not recognized for JSON decoding. + BinaryUnmarshaler: cbor.BinaryUnmarshalerNone, + }.DecMode() + if err != nil { + panic(err) + } + return decode +}() + +// DecodeLax is derived from Decode, but does not complain about unknown fields in the input. +var DecodeLax cbor.DecMode = func() cbor.DecMode { + opts := Decode.DecOptions() + opts.ExtraReturnErrors &^= cbor.ExtraDecErrorUnknownField // clear bit + dm, err := opts.DecMode() + if err != nil { + panic(err) + } + return dm +}() diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go similarity index 56% rename from vendor/k8s.io/gengo/examples/set-gen/generators/tags.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go index 52e87677..61f3f145 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/generators/tags.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2024 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,20 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package generators +package modes import ( - "k8s.io/gengo/types" - "k8s.io/klog/v2" + "github.com/fxamacker/cbor/v2" ) -// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if -// it exists, the value is boolean. If the tag did not exist, it returns -// false. -func extractBoolTagOrDie(key string, lines []string) bool { - val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) +var Diagnostic cbor.DiagMode = func() cbor.DiagMode { + opts := Decode.DecOptions() + diagnostic, err := cbor.DiagOptions{ + ByteStringText: true, + + MaxNestedLevels: opts.MaxNestedLevels, + MaxArrayElements: opts.MaxArrayElements, + MaxMapPairs: opts.MaxMapPairs, + }.DiagMode() if err != nil { - klog.Fatalf(err.Error()) + panic(err) } - return val -} + return diagnostic +}() diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go new file mode 100644 index 00000000..c6693138 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go @@ -0,0 +1,155 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modes + +import ( + "io" + + "github.com/fxamacker/cbor/v2" +) + +var Encode = EncMode{ + delegate: func() cbor.UserBufferEncMode { + encode, err := cbor.EncOptions{ + // Map keys need to be sorted to have deterministic output, and this is the order + // defined in RFC 8949 4.2.1 "Core Deterministic Encoding Requirements". + Sort: cbor.SortBytewiseLexical, + + // CBOR supports distinct types for IEEE-754 float16, float32, and float64. Store + // floats in the smallest width that preserves value so that equivalent float32 and + // float64 values encode to identical bytes, as they do in a JSON + // encoding. Satisfies one of the "Core Deterministic Encoding Requirements". + ShortestFloat: cbor.ShortestFloat16, + + // Error on attempt to encode NaN and infinite values. This is what the JSON + // serializer does. + NaNConvert: cbor.NaNConvertReject, + InfConvert: cbor.InfConvertReject, + + // Error on attempt to encode math/big.Int values, which can't be faithfully + // roundtripped through Unstructured in general (the dynamic numeric types allowed + // in Unstructured are limited to float64 and int64). + BigIntConvert: cbor.BigIntConvertReject, + + // MarshalJSON for time.Time writes RFC3339 with nanos. + Time: cbor.TimeRFC3339Nano, + + // The decoder must be able to accept RFC3339 strings with or without tag 0 (e.g. by + // the end of time.Time -> JSON -> Unstructured -> CBOR, the CBOR encoder has no + // reliable way of knowing that a particular string originated from serializing a + // time.Time), so producing tag 0 has little use. + TimeTag: cbor.EncTagNone, + + // Indefinite-length items have multiple encodings and aren't being used anyway, so + // disable to avoid an opportunity for nondeterminism. + IndefLength: cbor.IndefLengthForbidden, + + // Preserve distinction between nil and empty for slices and maps. + NilContainers: cbor.NilContainerAsNull, + + // OK to produce tags. + TagsMd: cbor.TagsAllowed, + + // Use the same definition of "empty" as encoding/json. + OmitEmpty: cbor.OmitEmptyGoValue, + + // The CBOR types text string and byte string are structurally equivalent, with the + // semantic difference that a text string whose content is an invalid UTF-8 sequence + // is itself invalid. We reject all invalid text strings at decode time and do not + // validate or sanitize all Go strings at encode time. Encoding Go strings to the + // byte string type is comparable to the existing Protobuf behavior and cheaply + // ensures that the output is valid CBOR. + String: cbor.StringToByteString, + + // Encode struct field names to the byte string type rather than the text string + // type. + FieldName: cbor.FieldNameToByteString, + + // Marshal Go byte arrays to CBOR arrays of integers (as in JSON) instead of byte + // strings. + ByteArray: cbor.ByteArrayToArray, + + // Marshal []byte to CBOR byte string enclosed in tag 22 (expected later base64 + // encoding, https://www.rfc-editor.org/rfc/rfc8949.html#section-3.4.5.2), to + // interoperate with the existing JSON behavior. This indicates to the decoder that, + // when decoding into a string (or unstructured), the resulting value should be the + // base64 encoding of the original bytes. No base64 encoding or decoding needs to be + // performed for []byte-to-CBOR-to-[]byte roundtrips. + ByteSliceLaterFormat: cbor.ByteSliceLaterFormatBase64, + + // Disable default recognition of types implementing encoding.BinaryMarshaler, which + // is not recognized for JSON encoding. + BinaryMarshaler: cbor.BinaryMarshalerNone, + }.UserBufferEncMode() + if err != nil { + panic(err) + } + return encode + }(), +} + +var EncodeNondeterministic = EncMode{ + delegate: func() cbor.UserBufferEncMode { + opts := Encode.options() + opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0. + em, err := opts.UserBufferEncMode() + if err != nil { + panic(err) + } + return em + }(), +} + +type EncMode struct { + delegate cbor.UserBufferEncMode +} + +func (em EncMode) options() cbor.EncOptions { + return em.delegate.EncOptions() +} + +func (em EncMode) MarshalTo(v interface{}, w io.Writer) error { + if buf, ok := w.(*buffer); ok { + return em.delegate.MarshalToBuffer(v, &buf.Buffer) + } + + buf := buffers.Get() + defer buffers.Put(buf) + if err := em.delegate.MarshalToBuffer(v, &buf.Buffer); err != nil { + return err + } + + if _, err := io.Copy(w, buf); err != nil { + return err + } + + return nil +} + +func (em EncMode) Marshal(v interface{}) ([]byte, error) { + buf := buffers.Get() + defer buffers.Put(buf) + + if err := em.MarshalTo(v, &buf.Buffer); err != nil { + return nil, err + } + + clone := make([]byte, buf.Len()) + copy(clone, buf.Bytes()) + + return clone, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 21944f2d..ff982084 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -259,8 +259,7 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { // invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). // // TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. -// -// All other callers will be forced to request a Codec directly. +// All other callers will be forced to request a Codec directly. func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 87b3fec3..971c46d4 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -134,23 +134,3 @@ func (e *encoder) Encode(obj runtime.Object) error { e.buf.Reset() return err } - -type encoderWithAllocator struct { - writer io.Writer - encoder runtime.EncoderWithAllocator - memAllocator runtime.MemoryAllocator -} - -// NewEncoderWithAllocator returns a new streaming encoder -func NewEncoderWithAllocator(w io.Writer, e runtime.EncoderWithAllocator, a runtime.MemoryAllocator) Encoder { - return &encoderWithAllocator{ - writer: w, - encoder: e, - memAllocator: a, - } -} - -// Encode writes the provided object to the nested writer -func (e *encoderWithAllocator) Encode(obj runtime.Object) error { - return e.encoder.EncodeWithAllocator(obj, e.writer, e.memAllocator) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index 44663318..25f955ed 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -147,7 +147,7 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{Decoder: c.decoder}); err != nil { if strictErr, ok := runtime.AsStrictDecodingError(err); ok { // save the strictDecodingError let and the caller decide what to do with it strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/splice.go b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go new file mode 100644 index 00000000..2badb7b9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go @@ -0,0 +1,76 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "io" +) + +// Splice is the interface that wraps the Splice method. +// +// Splice moves data from given slice without copying the underlying data for +// efficiency purpose. Therefore, the caller should make sure the underlying +// data is not changed later. +type Splice interface { + Splice([]byte) + io.Writer + Reset() + Bytes() []byte +} + +// A spliceBuffer implements Splice and io.Writer interfaces. +type spliceBuffer struct { + raw []byte + buf *bytes.Buffer +} + +func NewSpliceBuffer() Splice { + return &spliceBuffer{} +} + +// Splice implements the Splice interface. +func (sb *spliceBuffer) Splice(raw []byte) { + sb.raw = raw +} + +// Write implements the io.Writer interface. +func (sb *spliceBuffer) Write(p []byte) (n int, err error) { + if sb.buf == nil { + sb.buf = &bytes.Buffer{} + } + return sb.buf.Write(p) +} + +// Reset resets the buffer to be empty. +func (sb *spliceBuffer) Reset() { + if sb.buf != nil { + sb.buf.Reset() + } + sb.raw = nil +} + +// Bytes returns the data held by the buffer. +func (sb *spliceBuffer) Bytes() []byte { + if sb.buf != nil && len(sb.buf.Bytes()) > 0 { + return sb.buf.Bytes() + } + if sb.raw != nil { + return sb.raw + } + return []byte{} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 3dc9a5a2..1680c149 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -46,6 +46,7 @@ const ( ContentTypeJSON string = "application/json" ContentTypeYAML string = "application/yaml" ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeCBOR string = "application/cbor" ) // RawExtension is used to hold extensions in external versions. @@ -123,7 +124,7 @@ type Unknown struct { // Raw will hold the complete serialized object which couldn't be matched // with a registered type. Most likely, nothing should be done with this // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,2,opt,name=raw"` // ContentEncoding is encoding used to encode 'Raw' data. // Unspecified means no encoding. ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index b19750f3..db18ce1c 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -37,3 +37,14 @@ const ( func (n NamespacedName) String() string { return n.Namespace + string(Separator) + n.Name } + +// MarshalLog emits a struct containing required key/value pair +func (n NamespacedName) MarshalLog() interface{} { + return struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + }{ + Name: n.Name, + Namespace: n.Namespace, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go index 0d2f153b..1396274c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -40,6 +40,13 @@ func NewExpiringWithClock(clock clock.Clock) *Expiring { // Expiring is a map whose entries expire after a per-entry timeout. type Expiring struct { + // AllowExpiredGet causes the expiration check to be skipped on Get. + // It should only be used when a key always corresponds to the exact same value. + // Thus when this field is true, expired keys are considered valid + // until the next call to Set (which causes the GC to run). + // It may not be changed concurrently with calls to Get. + AllowExpiredGet bool + clock clock.Clock // mu protects the below fields @@ -70,7 +77,10 @@ func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { c.mu.RLock() defer c.mu.RUnlock() e, ok := c.cache[key] - if !ok || !c.clock.Now().Before(e.expiry) { + if !ok { + return nil, false + } + if !c.AllowExpiredGet && !c.clock.Now().Before(e.expiry) { return nil, false } return e.val, true diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go index 1328dd61..ad486d58 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -136,6 +136,19 @@ func (c *LRUExpireCache) Remove(key interface{}) { delete(c.entries, key) } +// RemoveAll removes all keys that match predicate. +func (c *LRUExpireCache) RemoveAll(predicate func(key any) bool) { + c.lock.Lock() + defer c.lock.Unlock() + + for key, element := range c.entries { + if predicate(key) { + c.evictionList.Remove(element) + delete(c.entries, key) + } + } +} + // Keys returns all unexpired keys in the cache. // // Keep in mind that subsequent calls to Get() for any of the returned keys diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index ec4002e3..fc030184 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -23,34 +23,20 @@ import ( "strings" "text/tabwriter" - "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/util/dump" ) -// StringDiff diffs a and b and returns a human readable diff. -func StringDiff(a, b string) string { - ba := []byte(a) - bb := []byte(b) - out := []byte{} - i := 0 - for ; i < len(ba) && i < len(bb); i++ { - if ba[i] != bb[i] { - break - } - out = append(out, ba[i]) - } - out = append(out, []byte("\n\nA: ")...) - out = append(out, ba[i:]...) - out = append(out, []byte("\n\nB: ")...) - out = append(out, bb[i:]...) - out = append(out, []byte("\n\n")...) - return string(out) -} - func legacyDiff(a, b interface{}) string { return cmp.Diff(a, b) } +// StringDiff diffs a and b and returns a human readable diff. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func StringDiff(a, b string) string { + return legacyDiff(a, b) +} + // ObjectDiff prints the diff of two go objects and fails if the objects // contain unhandled unexported fields. // DEPRECATED: use github.com/google/go-cmp/cmp.Diff @@ -75,13 +61,8 @@ func ObjectReflectDiff(a, b interface{}) string { // ObjectGoPrintSideBySide prints a and b as textual dumps side by side, // enabling easy visual scanning for mismatches. func ObjectGoPrintSideBySide(a, b interface{}) string { - s := spew.ConfigState{ - Indent: " ", - // Extra deep spew. - DisableMethods: true, - } - sA := s.Sdump(a) - sB := s.Sdump(b) + sA := dump.Pretty(a) + sB := dump.Pretty(b) linesA := strings.Split(sA, "\n") linesB := strings.Split(sB, "\n") diff --git a/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go new file mode 100644 index 00000000..cf61ef76 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dump + +import ( + "github.com/davecgh/go-spew/spew" +) + +var prettyPrintConfig = &spew.ConfigState{ + Indent: " ", + DisableMethods: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// The config MUST NOT be changed because that could change the result of a hash operation +var prettyPrintConfigForHash = &spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + DisablePointerAddresses: true, + DisableCapacities: true, +} + +// Pretty wrap the spew.Sdump with Indent, and disabled methods like error() and String() +// The output may change over time, so for guaranteed output please take more direct control +func Pretty(a interface{}) string { + return prettyPrintConfig.Sdump(a) +} + +// ForHash keeps the original Spew.Sprintf format to ensure the same checksum +func ForHash(a interface{}) string { + return prettyPrintConfigForHash.Sprintf("%#v", a) +} + +// OneLine outputs the object in one line +func OneLine(a interface{}) string { + return prettyPrintConfig.Sprintf("%#v", a) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go b/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go index c31b2a0c..a20136a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go @@ -21,7 +21,7 @@ import ( "time" ) -// ShortHumanDuration returns a succint representation of the provided duration +// ShortHumanDuration returns a succinct representation of the provided duration // with limited precision for consumption by humans. func ShortHumanDuration(d time.Duration) string { // Allow deviation no more than 2 seconds(excluded) to tolerate machine time @@ -42,7 +42,7 @@ func ShortHumanDuration(d time.Duration) string { return fmt.Sprintf("%dy", int(d.Hours()/24/365)) } -// HumanDuration returns a succint representation of the provided duration +// HumanDuration returns a succinct representation of the provided duration // with limited precision for consumption by humans. It provides ~2-3 significant // figures of duration. func HumanDuration(d time.Duration) string { diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 1f5a04fd..1b60d145 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -214,7 +214,7 @@ func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { return NewAggregate(result) } -// Reduce will return err or, if err is an Aggregate and only has one item, +// Reduce will return err or nil, if err is an Aggregate and only has one item, // the first item in the aggregate. func Reduce(err error) error { if agg, ok := err.(Aggregate); ok && err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index ca08f856..1ab8fd39 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -32,7 +32,7 @@ func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { return &lengthDelimitedFrameWriter{w: w} } -// Write writes a single frame to the nested writer, prepending it with the length in +// Write writes a single frame to the nested writer, prepending it with the length // in bytes of data (as a 4 byte, bigendian uint32). func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) @@ -147,7 +147,6 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see // data written to data, or be larger than data and a different array. - n := len(data) m := json.RawMessage(data[:0]) if err := r.decoder.Decode(&m); err != nil { return 0, err @@ -156,12 +155,19 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) { // If capacity of data is less than length of the message, decoder will allocate a new slice // and set m to it, which means we need to copy the partial result back into data and preserve // the remaining result for subsequent reads. - if len(m) > n { - //nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here. - data = append(data[0:0], m[:n]...) - r.remaining = m[n:] - return n, io.ErrShortBuffer + if len(m) > cap(data) { + copy(data, m) + r.remaining = m[len(data):] + return len(data), io.ErrShortBuffer } + + if len(m) > len(data) { + // The bytes beyond len(data) were stored in data's underlying array, which we do + // not own after this function returns. + r.remaining = append([]byte(nil), m[len(data):]...) + return len(data), io.ErrShortBuffer + } + return len(m), nil } diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 8f9ced93..1f287739 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// source: k8s.io/apimachinery/pkg/util/intstr/generated.proto package intstr @@ -43,7 +43,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *IntOrString) Reset() { *m = IntOrString{} } func (*IntOrString) ProtoMessage() {} func (*IntOrString) Descriptor() ([]byte, []int) { - return fileDescriptor_94e046ae3ce6121c, []int{0} + return fileDescriptor_771bacc35a5ec189, []int{0} } func (m *IntOrString) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -73,30 +73,29 @@ func init() { } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) + proto.RegisterFile("k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_771bacc35a5ec189) } -var fileDescriptor_94e046ae3ce6121c = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xb1, 0x4a, 0x03, 0x31, - 0x1c, 0xc6, 0x13, 0x5b, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x58, 0x90, 0x5b, 0x4c, - 0x56, 0x71, 0xec, 0x56, 0x10, 0x84, 0x56, 0x1c, 0xdc, 0xee, 0xda, 0x98, 0x86, 0x6b, 0x93, 0x90, - 0xfb, 0x9f, 0x70, 0x5b, 0x1f, 0x41, 0x37, 0x47, 0x1f, 0xe7, 0xc6, 0x8e, 0x1d, 0xa4, 0x78, 0xf1, - 0x2d, 0x9c, 0xe4, 0x72, 0x07, 0x3a, 0x3a, 0x25, 0xdf, 0xf7, 0xfd, 0x7e, 0x19, 0x12, 0xdc, 0xa6, - 0xd7, 0x19, 0x95, 0x9a, 0xa5, 0x79, 0xc2, 0xad, 0xe2, 0xc0, 0x33, 0xf6, 0xcc, 0xd5, 0x42, 0x5b, - 0xd6, 0x0e, 0xb1, 0x91, 0xeb, 0x78, 0xbe, 0x94, 0x8a, 0xdb, 0x82, 0x99, 0x54, 0xb0, 0x1c, 0xe4, - 0x8a, 0x49, 0x05, 0x19, 0x58, 0x26, 0xb8, 0xe2, 0x36, 0x06, 0xbe, 0xa0, 0xc6, 0x6a, 0xd0, 0xfd, - 0x51, 0x23, 0xd1, 0xbf, 0x12, 0x35, 0xa9, 0xa0, 0xb5, 0x44, 0x1b, 0xe9, 0xfc, 0x4a, 0x48, 0x58, - 0xe6, 0x09, 0x9d, 0xeb, 0x35, 0x13, 0x5a, 0x68, 0xe6, 0xdd, 0x24, 0x7f, 0xf2, 0xc9, 0x07, 0x7f, - 0x6b, 0xde, 0xbc, 0x78, 0xc5, 0xc1, 0xc9, 0x44, 0xc1, 0x9d, 0x9d, 0x81, 0x95, 0x4a, 0xf4, 0xa3, - 0xa0, 0x0b, 0x85, 0xe1, 0x03, 0x1c, 0xe2, 0xa8, 0x33, 0x3e, 0x2b, 0xf7, 0x43, 0xe4, 0xf6, 0xc3, - 0xee, 0x7d, 0x61, 0xf8, 0x77, 0x7b, 0x4e, 0x3d, 0xd1, 0xbf, 0x0c, 0x7a, 0x52, 0xc1, 0x43, 0xbc, - 0x1a, 0x1c, 0x84, 0x38, 0x3a, 0x1c, 0x9f, 0xb6, 0x6c, 0x6f, 0xe2, 0xdb, 0x69, 0xbb, 0xd6, 0x5c, - 0x06, 0xb6, 0xe6, 0x3a, 0x21, 0x8e, 0x8e, 0x7f, 0xb9, 0x99, 0x6f, 0xa7, 0xed, 0x7a, 0x73, 0xf4, - 0xf6, 0x3e, 0x44, 0x9b, 0x8f, 0x10, 0x8d, 0x27, 0x65, 0x45, 0xd0, 0xb6, 0x22, 0x68, 0x57, 0x11, - 0xb4, 0x71, 0x04, 0x97, 0x8e, 0xe0, 0xad, 0x23, 0x78, 0xe7, 0x08, 0xfe, 0x74, 0x04, 0xbf, 0x7c, - 0x11, 0xf4, 0x38, 0xfa, 0xc7, 0x17, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xdc, 0xc4, 0xf0, 0xa0, - 0x81, 0x01, 0x00, 0x00, +var fileDescriptor_771bacc35a5ec189 = []byte{ + // 277 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xce, 0xb6, 0x28, 0xd6, + 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, + 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2d, 0xc9, 0xcc, 0xd1, 0xcf, 0xcc, 0x2b, 0x29, 0x2e, 0x29, 0xd2, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x52, 0x86, 0x68, 0xd2, 0x43, 0xd6, 0xa4, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd2, 0xa4, 0x07, 0xd1, + 0x24, 0xa5, 0x9b, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x9e, 0x9f, + 0x9e, 0xaf, 0x0f, 0xd6, 0x9b, 0x54, 0x9a, 0x06, 0xe6, 0x81, 0x39, 0x60, 0x16, 0xc4, 0x4c, 0xa5, + 0x89, 0x8c, 0x5c, 0xdc, 0x9e, 0x79, 0x25, 0xfe, 0x45, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x42, + 0x1a, 0x5c, 0x2c, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x4e, 0x22, 0x27, + 0xee, 0xc9, 0x33, 0x3c, 0xba, 0x27, 0xcf, 0x12, 0x52, 0x59, 0x90, 0xfa, 0x0b, 0x4a, 0x07, 0x81, + 0x55, 0x08, 0xa9, 0x71, 0xb1, 0x65, 0xe6, 0x95, 0x84, 0x25, 0xe6, 0x48, 0x30, 0x29, 0x30, 0x6a, + 0xb0, 0x3a, 0xf1, 0x41, 0xd5, 0xb2, 0x79, 0x82, 0x45, 0x83, 0xa0, 0xb2, 0x20, 0x75, 0xc5, 0x25, + 0x45, 0x20, 0x75, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x08, 0x75, 0xc1, 0x60, 0xd1, 0x20, 0xa8, 0xac, + 0x15, 0xc7, 0x8c, 0x05, 0xf2, 0x0c, 0x0d, 0x77, 0x14, 0x18, 0x9c, 0x3c, 0x4f, 0x3c, 0x94, 0x63, + 0xb8, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, + 0x31, 0x5e, 0x78, 0x24, 0xc7, 0x78, 0xe3, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, + 0xcb, 0x31, 0x44, 0x29, 0x13, 0x11, 0x84, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0xa1, 0x0b, + 0x1e, 0x68, 0x01, 0x00, 0x00, } func (m *IntOrString) Marshal() (dAtA []byte, err error) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 5e800970..5fd2e16c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" + cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct" "k8s.io/klog/v2" ) @@ -54,7 +55,7 @@ const ( // FromInt creates an IntOrString object with an int32 value. It is // your responsibility not to call this method with a value greater // than int32. -// TODO: convert to (val int32) +// Deprecated: use FromInt32 instead. func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) @@ -62,19 +63,24 @@ func FromInt(val int) IntOrString { return IntOrString{Type: Int, IntVal: int32(val)} } +// FromInt32 creates an IntOrString object with an int32 value. +func FromInt32(val int32) IntOrString { + return IntOrString{Type: Int, IntVal: val} +} + // FromString creates an IntOrString object with a string value. func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } -// Parse the given string and try to convert it to an integer before +// Parse the given string and try to convert it to an int32 integer before // setting it as a string value. func Parse(val string) IntOrString { - i, err := strconv.Atoi(val) + i, err := strconv.ParseInt(val, 10, 32) if err != nil { return FromString(val) } - return FromInt(i) + return FromInt32(int32(i)) } // UnmarshalJSON implements the json.Unmarshaller interface. @@ -87,6 +93,20 @@ func (intstr *IntOrString) UnmarshalJSON(value []byte) error { return json.Unmarshal(value, &intstr.IntVal) } +func (intstr *IntOrString) UnmarshalCBOR(value []byte) error { + if err := cbor.Unmarshal(value, &intstr.StrVal); err == nil { + intstr.Type = String + return nil + } + + if err := cbor.Unmarshal(value, &intstr.IntVal); err != nil { + return err + } + + intstr.Type = Int + return nil +} + // String returns the string value, or the Itoa of the int value. func (intstr *IntOrString) String() string { if intstr == nil { @@ -121,6 +141,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } +func (intstr IntOrString) MarshalCBOR() ([]byte, error) { + switch intstr.Type { + case Int: + return cbor.Marshal(intstr.IntVal) + case String: + return cbor.Marshal(intstr.StrVal) + default: + return nil, fmt.Errorf("impossible IntOrString.Type") + } +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml new file mode 100644 index 00000000..a667e983 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml @@ -0,0 +1,7018 @@ +apiVersion: v1 +kind: Endpoints +metadata: + creationTimestamp: '2016-10-04T17:45:58Z' + labels: + app: my-app + name: app-server + namespace: default + resourceVersion: '184597135' + selfLink: /self/link + uid: 6826f086-8a5a-11e6-8d09-42010a800005 +subsets: +- addresses: + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0000 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0001 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0002 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0003 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0004 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0005 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0006 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0007 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0008 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0009 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0010 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0011 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0012 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0013 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0014 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0015 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0016 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0017 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0018 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0019 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0020 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0021 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0022 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0023 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0024 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0025 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0026 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0027 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0028 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0029 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0030 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0031 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0032 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0033 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0034 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0035 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0036 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0037 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0038 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0039 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0040 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0041 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0042 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0043 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0044 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0045 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0046 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0047 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0048 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0049 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0050 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0051 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0052 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0053 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0054 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0055 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0056 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0057 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0058 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0059 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0060 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0061 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0062 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0063 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0064 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0065 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0066 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0067 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0068 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0069 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0070 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0071 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0072 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0073 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0074 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0075 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0076 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0077 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0078 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0079 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0080 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0081 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0082 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0083 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0084 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0085 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0086 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0087 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0088 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0089 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0090 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0091 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0092 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0093 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0094 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0095 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0096 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0097 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0098 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0099 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0100 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0101 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0102 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0103 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0104 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0105 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0106 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0107 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0108 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0109 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0110 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0111 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0112 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0113 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0114 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0115 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0116 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0117 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0118 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0119 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0120 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0121 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0122 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0123 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0124 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0125 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0126 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0127 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0128 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0129 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0130 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0131 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0132 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0133 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0134 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0135 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0136 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0137 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0138 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0139 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0140 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0141 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0142 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0143 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0144 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0145 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0146 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0147 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0148 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0149 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0150 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0151 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0152 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0153 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0154 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0155 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0156 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0157 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0158 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0159 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0160 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0161 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0162 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0163 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0164 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0165 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0166 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0167 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0168 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0169 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0170 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0171 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0172 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0173 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0174 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0175 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0176 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0177 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0178 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0179 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0180 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0181 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0182 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0183 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0184 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0185 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0186 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0187 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0188 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0189 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0190 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0191 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0192 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0193 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0194 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0195 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0196 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0197 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0198 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0199 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0200 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0201 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0202 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0203 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0204 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0205 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0206 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0207 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0208 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0209 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0210 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0211 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0212 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0213 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0214 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0215 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0216 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0217 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0218 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0219 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0220 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0221 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0222 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0223 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0224 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0225 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0226 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0227 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0228 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0229 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0230 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0231 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0232 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0233 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0234 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0235 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0236 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0237 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0238 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0239 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0240 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0241 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0242 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0243 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0244 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0245 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0246 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0247 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0248 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0249 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0250 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0251 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0252 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0253 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0254 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0255 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0256 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0257 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0258 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0259 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0260 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0261 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0262 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0263 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0264 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0265 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0266 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0267 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0268 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0269 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0270 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0271 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0272 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0273 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0274 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0275 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0276 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0277 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0278 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0279 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0280 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0281 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0282 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0283 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0284 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0285 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0286 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0287 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0288 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0289 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0290 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0291 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0292 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0293 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0294 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0295 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0296 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0297 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0298 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0299 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0300 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0301 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0302 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0303 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0304 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0305 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0306 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0307 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0308 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0309 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0310 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0311 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0312 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0313 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0314 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0315 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0316 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0317 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0318 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0319 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0320 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0321 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0322 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0323 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0324 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0325 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0326 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0327 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0328 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0329 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0330 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0331 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0332 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0333 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0334 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0335 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0336 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0337 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0338 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0339 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0340 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0341 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0342 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0343 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0344 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0345 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0346 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0347 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0348 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0349 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0350 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0351 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0352 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0353 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0354 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0355 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0356 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0357 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0358 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0359 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0360 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0361 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0362 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0363 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0364 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0365 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0366 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0367 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0368 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0369 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0370 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0371 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0372 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0373 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0374 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0375 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0376 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0377 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0378 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0379 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0380 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0381 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0382 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0383 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0384 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0385 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0386 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0387 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0388 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0389 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0390 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0391 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0392 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0393 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0394 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0395 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0396 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0397 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0398 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0399 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0400 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0401 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0402 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0403 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0404 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0405 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0406 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0407 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0408 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0409 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0410 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0411 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0412 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0413 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0414 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0415 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0416 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0417 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0418 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0419 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0420 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0421 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0422 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0423 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0424 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0425 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0426 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0427 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0428 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0429 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0430 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0431 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0432 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0433 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0434 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0435 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0436 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0437 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0438 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0439 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0440 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0441 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0442 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0443 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0444 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0445 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0446 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0447 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0448 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0449 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0450 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0451 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0452 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0453 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0454 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0455 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0456 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0457 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0458 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0459 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0460 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0461 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0462 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0463 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0464 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0465 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0466 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0467 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0468 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0469 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0470 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0471 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0472 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0473 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0474 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0475 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0476 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0477 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0478 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0479 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0480 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0481 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0482 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0483 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0484 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0485 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0486 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0487 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0488 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0489 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0490 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0491 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0492 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0493 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0494 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0495 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0496 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0497 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0498 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0499 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0500 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0501 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0502 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0503 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0504 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0505 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0506 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0507 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0508 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0509 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0510 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0511 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0512 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0513 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0514 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0515 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0516 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0517 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0518 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0519 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0520 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0521 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0522 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0523 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0524 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0525 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0526 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0527 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0528 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0529 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0530 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0531 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0532 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0533 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0534 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0535 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0536 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0537 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0538 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0539 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0540 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0541 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0542 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0543 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0544 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0545 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0546 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0547 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0548 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0549 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0550 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0551 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0552 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0553 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0554 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0555 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0556 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0557 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0558 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0559 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0560 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0561 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0562 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0563 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0564 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0565 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0566 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0567 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0568 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0569 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0570 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0571 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0572 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0573 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0574 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0575 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0576 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0577 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0578 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0579 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0580 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0581 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0582 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0583 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0584 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0585 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0586 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0587 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0588 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0589 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0590 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0591 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0592 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0593 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0594 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0595 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0596 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0597 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0598 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0599 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0600 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0601 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0602 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0603 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0604 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0605 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0606 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0607 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0608 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0609 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0610 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0611 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0612 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0613 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0614 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0615 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0616 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0617 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0618 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0619 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0620 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0621 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0622 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0623 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0624 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0625 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0626 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0627 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0628 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0629 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0630 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0631 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0632 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0633 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0634 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0635 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0636 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0637 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0638 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0639 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0640 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0641 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0642 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0643 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0644 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0645 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0646 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0647 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0648 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0649 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0650 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0651 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0652 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0653 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0654 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0655 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0656 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0657 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0658 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0659 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0660 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0661 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0662 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0663 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0664 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0665 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0666 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0667 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0668 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0669 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0670 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0671 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0672 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0673 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0674 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0675 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0676 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0677 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0678 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0679 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0680 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0681 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0682 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0683 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0684 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0685 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0686 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0687 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0688 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0689 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0690 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0691 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0692 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0693 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0694 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0695 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0696 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0697 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0698 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0699 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0700 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0701 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0702 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0703 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0704 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0705 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0706 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0707 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0708 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0709 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0710 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0711 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0712 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0713 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0714 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0715 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0716 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0717 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0718 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0719 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0720 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0721 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0722 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0723 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0724 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0725 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0726 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0727 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0728 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0729 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0730 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0731 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0732 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0733 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0734 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0735 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0736 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0737 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0738 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0739 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0740 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0741 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0742 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0743 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0744 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0745 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0746 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0747 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0748 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0749 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0750 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0751 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0752 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0753 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0754 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0755 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0756 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0757 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0758 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0759 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0760 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0761 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0762 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0763 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0764 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0765 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0766 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0767 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0768 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0769 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0770 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0771 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0772 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0773 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0774 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0775 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0776 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0777 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0778 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0779 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0780 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0781 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0782 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0783 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0784 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0785 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0786 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0787 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0788 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0789 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0790 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0791 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0792 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0793 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0794 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0795 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0796 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0797 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0798 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0799 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0800 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0801 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0802 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0803 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0804 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0805 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0806 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0807 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0808 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0809 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0810 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0811 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0812 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0813 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0814 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0815 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0816 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0817 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0818 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0819 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0820 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0821 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0822 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0823 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0824 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0825 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0826 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0827 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0828 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0829 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0830 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0831 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0832 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0833 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0834 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0835 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0836 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0837 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0838 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0839 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0840 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0841 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0842 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0843 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0844 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0845 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0846 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0847 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0848 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0849 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0850 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0851 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0852 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0853 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0854 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0855 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0856 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0857 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0858 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0859 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0860 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0861 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0862 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0863 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0864 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0865 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0866 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0867 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0868 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0869 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0870 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0871 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0872 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0873 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0874 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0875 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0876 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0877 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0878 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0879 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0880 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0881 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0882 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0883 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0884 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0885 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0886 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0887 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0888 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0889 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0890 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0891 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0892 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0893 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0894 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0895 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0896 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0897 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0898 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0899 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0900 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0901 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0902 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0903 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0904 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0905 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0906 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0907 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0908 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0909 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0910 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0911 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0912 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0913 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0914 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0915 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0916 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0917 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0918 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0919 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0920 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0921 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0922 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0923 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0924 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0925 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0926 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0927 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0928 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0929 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0930 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0931 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0932 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0933 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0934 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0935 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0936 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0937 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0938 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0939 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0940 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0941 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0942 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0943 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0944 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0945 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0946 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0947 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0948 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0949 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0950 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0951 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0952 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0953 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0954 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0955 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0956 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0957 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0958 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0959 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0960 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0961 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0962 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0963 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0964 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0965 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0966 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0967 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0968 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0969 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0970 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0971 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0972 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0973 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0974 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0975 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0976 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0977 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0978 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0979 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0980 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0981 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0982 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0983 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0984 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0985 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0986 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0987 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0988 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0989 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0990 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0991 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0992 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0993 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0994 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0995 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0996 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0997 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0998 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + - ip: 10.0.0.1 + targetRef: + kind: Pod + name: pod-name-1234-0999 + namespace: default + resourceVersion: '1234567890' + uid: 11111111-2222-3333-4444-555555555555 + ports: + - name: port-name + port: 8080 + protocol: TCP + diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go new file mode 100644 index 00000000..978ffb3c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager = internal.FieldManager + +// NewDefaultFieldManager creates a new FieldManager that merges apply requests +// and update managed fields for other types of requests. +func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) { + f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +// NewDefaultCRDFieldManager creates a new FieldManager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) { + f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields) + if err != nil { + return nil, fmt.Errorf("failed to create field manager: %v", err) + } + return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil +} + +func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error { + _, err := internal.DecodeManagedFields(encodedManagedFields) + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go new file mode 100644 index 00000000..b75ef741 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "time" +) + +// AtMostEvery will never run the method more than once every specified +// duration. +type AtMostEvery struct { + delay time.Duration + lastCall time.Time + mutex sync.Mutex +} + +// NewAtMostEvery creates a new AtMostEvery, that will run the method at +// most every given duration. +func NewAtMostEvery(delay time.Duration) *AtMostEvery { + return &AtMostEvery{ + delay: delay, + } +} + +// updateLastCall returns true if the lastCall time has been updated, +// false if it was too early. +func (s *AtMostEvery) updateLastCall() bool { + s.mutex.Lock() + defer s.mutex.Unlock() + if time.Since(s.lastCall) < s.delay { + return false + } + s.lastCall = time.Now() + return true +} + +// Do will run the method if enough time has passed, and return true. +// Otherwise, it does nothing and returns false. +func (s *AtMostEvery) Do(fn func()) bool { + if !s.updateLastCall() { + return false + } + fn() + return true +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go new file mode 100644 index 00000000..fa342ca1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type buildManagerInfoManager struct { + fieldManager Manager + groupVersion schema.GroupVersion + subresource string +} + +var _ Manager = &buildManagerInfoManager{} + +// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier +// combining operation and version for update requests, and just operation for apply requests. +func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion, subresource string) Manager { + return &buildManagerInfoManager{ + fieldManager: f, + groupVersion: gv, + subresource: subresource, + } +} + +// Update implements Manager. +func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply) + if err != nil { + return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) +} + +func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) { + managerInfo := metav1.ManagedFieldsEntry{ + Manager: prefix, + Operation: operation, + APIVersion: f.groupVersion.String(), + Subresource: f.subresource, + } + if managerInfo.Manager == "" { + managerInfo.Manager = "unknown" + } + return BuildManagerIdentifier(&managerInfo) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go new file mode 100644 index 00000000..8951932b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go @@ -0,0 +1,133 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type capManagersManager struct { + fieldManager Manager + maxUpdateManagers int + oldUpdatesManagerName string +} + +var _ Manager = &capManagersManager{} + +// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates +// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update. +func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager { + return &capManagersManager{ + fieldManager: fieldManager, + maxUpdateManagers: maxUpdateManagers, + oldUpdatesManagerName: "ancient-changes", + } +} + +// Update implements Manager. +func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return object, managed, err + } + if managed, err = f.capUpdateManagers(managed); err != nil { + return nil, nil, fmt.Errorf("failed to cap update managers: %v", err) + } + return object, managed, nil +} + +// Apply implements Manager. +func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} + +// capUpdateManagers merges a number of the oldest update entries into versioned buckets, +// such that the number of entries from updates does not exceed f.maxUpdateManagers. +func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) { + // Gather all entries from updates + updaters := []string{} + for manager, fields := range managed.Fields() { + if !fields.Applied() { + updaters = append(updaters, manager) + } + } + if len(updaters) <= f.maxUpdateManagers { + return managed, nil + } + + // If we have more than the maximum, sort the update entries by time, oldest first. + sort.Slice(updaters, func(i, j int) bool { + iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0) + if iTime != nil { + iSeconds = iTime.Unix() + } + if jTime != nil { + jSeconds = jTime.Unix() + } + if iSeconds != jSeconds { + return iSeconds < jSeconds + } + return updaters[i] < updaters[j] + }) + + // Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap + versionToFirstManager := map[string]string{} + for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ { + manager := updaters[i] + vs := managed.Fields()[manager] + time := managed.Times()[manager] + version := string(vs.APIVersion()) + + // Create a new manager identifier for the versioned bucket entry. + // The version for this manager comes from the version of the update being merged into the bucket. + bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{ + Manager: f.oldUpdatesManagerName, + Operation: metav1.ManagedFieldsOperationUpdate, + APIVersion: version, + }) + if err != nil { + return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err) + } + + // Merge the fieldets if this is not the first time the version was seen. + // Otherwise just record the manager name in versionToFirstManager + if first, ok := versionToFirstManager[version]; ok { + // If the bucket doesn't exists yet, create one. + if _, ok := managed.Fields()[bucket]; !ok { + s := managed.Fields()[first] + delete(managed.Fields(), first) + managed.Fields()[bucket] = s + } + + managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied()) + delete(managed.Fields(), manager) + length-- + + // Use the time from the update being merged into the bucket, since it is more recent. + managed.Times()[bucket] = time + } else { + versionToFirstManager[version] = manager + } + } + + return managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go new file mode 100644 index 00000000..8c044c91 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// NewConflictError returns an error including details on the requests apply conflicts +func NewConflictError(conflicts merge.Conflicts) *errors.StatusError { + causes := []metav1.StatusCause{} + for _, conflict := range conflicts { + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseTypeFieldManagerConflict, + Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)), + Field: conflict.Path.String(), + }) + } + return errors.NewApplyConflict(causes, getConflictMessage(conflicts)) +} + +func getConflictMessage(conflicts merge.Conflicts) string { + if len(conflicts) == 1 { + return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path) + } + + m := map[string][]fieldpath.Path{} + for _, conflict := range conflicts { + m[conflict.Manager] = append(m[conflict.Manager], conflict.Path) + } + + uniqueManagers := []string{} + for manager := range m { + uniqueManagers = append(uniqueManagers, manager) + } + + // Print conflicts by sorted managers. + sort.Strings(uniqueManagers) + + messages := []string{} + for _, manager := range uniqueManagers { + messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager))) + for _, path := range m[manager] { + messages = append(messages, fmt.Sprintf("- %v", path)) + } + } + return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n")) +} + +func printManager(manager string) string { + encodedManager := &metav1.ManagedFieldsEntry{} + if err := json.Unmarshal([]byte(manager), encodedManager); err != nil { + return fmt.Sprintf("%q", manager) + } + managerStr := fmt.Sprintf("%q", encodedManager.Manager) + if encodedManager.Subresource != "" { + managerStr = fmt.Sprintf("%s with subresource %q", managerStr, encodedManager.Subresource) + } + if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate { + if encodedManager.Time == nil { + return fmt.Sprintf("%s using %v", managerStr, encodedManager.APIVersion) + } + return fmt.Sprintf("%s using %v at %v", managerStr, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339)) + } + return managerStr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go new file mode 100644 index 00000000..eca04a71 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go @@ -0,0 +1,209 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates +// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum. +// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries. +const DefaultMaxUpdateManagers int = 10 + +// DefaultTrackOnCreateProbability defines the default probability that the field management of an object +// starts being tracked from the object's creation, instead of from the first time the object is applied to. +const DefaultTrackOnCreateProbability float32 = 1 + +var atMostEverySecond = NewAtMostEvery(time.Second) + +// FieldManager updates the managed fields and merges applied +// configurations. +type FieldManager struct { + fieldManager Manager + subresource string +} + +// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields +// on update and apply requests. +func NewFieldManager(f Manager, subresource string) *FieldManager { + return &FieldManager{fieldManager: f, subresource: subresource} +} + +// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic. +func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager { + return NewFieldManager( + NewVersionCheckManager( + NewLastAppliedUpdater( + NewLastAppliedManager( + NewProbabilisticSkipNonAppliedManager( + NewCapManagersManager( + NewBuildManagerInfoManager( + NewManagedFieldsUpdater( + NewStripMetaManager(f), + ), kind.GroupVersion(), subresource, + ), DefaultMaxUpdateManagers, + ), objectCreater, DefaultTrackOnCreateProbability, + ), typeConverter, objectConverter, kind.GroupVersion(), + ), + ), kind, + ), subresource, + ) +} + +func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) { + liveAccessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, err + } + + // We take the managedFields of the live object in case the request tries to + // manually set managedFields via a subresource. + if ignoreManagedFieldsFromRequestObject { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + + // If the object doesn't have metadata, we should just return without trying to + // set the managedFields at all, so creates/updates/patches will work normally. + newAccessor, err := meta.Accessor(newObj) + if err != nil { + return nil, err + } + + if isResetManagedFields(newAccessor.GetManagedFields()) { + return NewEmptyManaged(), nil + } + + // If the managed field is empty or we failed to decode it, + // let's try the live object. This is to prevent clients who + // don't understand managedFields from deleting it accidentally. + managed, err := DecodeManagedFields(newAccessor.GetManagedFields()) + if err != nil || len(managed.Fields()) == 0 { + return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields())) + } + return managed, nil +} + +func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) { + if err != nil { + return NewEmptyManaged(), nil + } + return managed, nil +} + +// Update is used when the object has already been merged (non-apply +// use-case), and simply updates the managed fields in the output +// object. +func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) { + // First try to decode the managed fields provided in the update, + // This is necessary to allow directly updating managed fields. + isSubresource := f.subresource != "" + managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource) + if err != nil { + return newObj, nil + } + + RemoveObjectManagedFields(newObj) + + if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil { + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} + +// UpdateNoErrors is the same as Update, but it will not return +// errors. If an error happens, the object is returned with +// managedFields cleared. +func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object { + obj, err := f.Update(liveObj, newObj, manager) + if err != nil { + atMostEverySecond.Do(func() { + ns, name := "unknown", "unknown" + if accessor, err := meta.Accessor(newObj); err == nil { + ns = accessor.GetNamespace() + name = accessor.GetName() + } + + klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind", + newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name) + }) + // Explicitly remove managedFields on failure, so that + // we can't have garbage in it. + RemoveObjectManagedFields(newObj) + return newObj + } + return obj +} + +// Returns true if the managedFields indicate that the user is trying to +// reset the managedFields, i.e. if the list is non-nil but empty, or if +// the list has one empty item. +func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool { + if len(managedFields) == 0 { + return managedFields != nil + } + + if len(managedFields) == 1 { + return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{}) + } + + return false +} + +// Apply is used when server-side apply is called, as it merges the +// object and updates the managed fields. +func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) { + // If the object doesn't have metadata, apply isn't allowed. + accessor, err := meta.Accessor(liveObj) + if err != nil { + return nil, fmt.Errorf("couldn't get accessor: %v", err) + } + + // Decode the managed fields in the live object, since it isn't allowed in the patch. + managed, err := DecodeManagedFields(accessor.GetManagedFields()) + if err != nil { + return nil, fmt.Errorf("failed to decode managed fields: %v", err) + } + + object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + if conflicts, ok := err.(merge.Conflicts); ok { + return nil, NewConflictError(conflicts) + } + return nil, err + } + + if err = EncodeObjectManagedFields(object, managed); err != nil { + return nil, fmt.Errorf("failed to encode managed fields: %v", err) + } + + return object, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go new file mode 100644 index 00000000..08186191 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// EmptyFields represents a set with no paths +// It looks like metav1.Fields{Raw: []byte("{}")} +var EmptyFields = func() metav1.FieldsV1 { + f, err := SetToFields(*fieldpath.NewSet()) + if err != nil { + panic("should never happen") + } + return f +}() + +// FieldsToSet creates a set paths from an input trie of fields +func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) { + err = s.FromJSON(bytes.NewReader(f.Raw)) + return s, err +} + +// SetToFields creates a trie of fields from an input set of paths +func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) { + f.Raw, err = s.ToJSON() + return f, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go new file mode 100644 index 00000000..b00b6b82 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" +) + +// LastAppliedConfigAnnotation is the annotation used to store the previous +// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. +// +// This is a copy of the corev1 annotation since we don't want to depend on the whole package. +const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration" + +// SetLastApplied sets the last-applied annotation the given value in +// the object. +func SetLastApplied(obj runtime.Object, value string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[LastAppliedConfigAnnotation] = value + if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil { + delete(annotations, LastAppliedConfigAnnotation) + } + accessor.SetAnnotations(annotations) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go new file mode 100644 index 00000000..3f6cf882 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go @@ -0,0 +1,171 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" +) + +type lastAppliedManager struct { + fieldManager Manager + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + groupVersion schema.GroupVersion +} + +var _ Manager = &lastAppliedManager{} + +// NewLastAppliedManager converts the client-side apply annotation to +// server-side apply managed fields +func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager { + return &lastAppliedManager{ + fieldManager: fieldManager, + typeConverter: typeConverter, + objectConverter: objectConverter, + groupVersion: groupVersion, + } +} + +// Update implements Manager. +func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply will consider the last-applied annotation +// for upgrading an object managed by client-side apply to server-side apply +// without conflicts. +func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + // Upgrade the client-side apply annotation only from kubectl server-side-apply. + // To opt-out of this behavior, users may specify a different field manager. + if manager != "kubectl" { + return newLiveObj, newManaged, newErr + } + + // Check if we have conflicts + if newErr == nil { + return newLiveObj, newManaged, newErr + } + conflicts, ok := newErr.(merge.Conflicts) + if !ok { + return newLiveObj, newManaged, newErr + } + conflictSet := conflictsToSet(conflicts) + + // Check if conflicts are allowed due to client-side apply, + // and if so, then force apply + allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj) + if err != nil { + return newLiveObj, newManaged, newErr + } + if !conflictSet.Difference(allowedConflictSet).Empty() { + newConflicts := conflictsDifference(conflicts, allowedConflictSet) + return newLiveObj, newManaged, newConflicts + } + + return f.fieldManager.Apply(liveObj, newObj, managed, manager, true) +} + +func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) { + var accessor, err = meta.Accessor(liveObj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // If there is no client-side apply annotation, then there is nothing to do + var annotations = accessor.GetAnnotations() + if annotations == nil { + return nil, fmt.Errorf("no last applied annotation") + } + var lastApplied, ok = annotations[LastAppliedConfigAnnotation] + if !ok || lastApplied == "" { + return nil, fmt.Errorf("no last applied annotation") + } + + liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + if err != nil { + return nil, fmt.Errorf("failed to convert live obj to typed: %v", err) + } + + var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}} + err = json.Unmarshal([]byte(lastApplied), lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied) + } + + if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() { + return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err) + } + + lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj) + if err != nil { + return nil, fmt.Errorf("failed to convert last applied to typed: %v", err) + } + + lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err) + } + + comparison, err := lastAppliedObjTyped.Compare(liveObjTyped) + if err != nil { + return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err) + } + + // Remove fields in last applied that are different, added, or missing in + // the live object. + // Because last-applied fields don't match the live object fields, + // then we don't own these fields. + lastAppliedObjFieldSet = lastAppliedObjFieldSet. + Difference(comparison.Modified). + Difference(comparison.Added). + Difference(comparison.Removed) + + return lastAppliedObjFieldSet, nil +} + +// TODO: replace with merge.Conflicts.ToSet() +func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set { + conflictSet := fieldpath.NewSet() + for _, conflict := range []merge.Conflict(conflicts) { + conflictSet.Insert(conflict.Path) + } + return conflictSet +} + +func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts { + newConflicts := []merge.Conflict{} + for _, conflict := range []merge.Conflict(conflicts) { + if !s.Has(conflict.Path) { + newConflicts = append(newConflicts, conflict) + } + } + return newConflicts +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go new file mode 100644 index 00000000..06e6c5d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go @@ -0,0 +1,102 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +type lastAppliedUpdater struct { + fieldManager Manager +} + +var _ Manager = &lastAppliedUpdater{} + +// NewLastAppliedUpdater sets the client-side apply annotation up to date with +// server-side apply managed fields +func NewLastAppliedUpdater(fieldManager Manager) Manager { + return &lastAppliedUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// server-side apply managed fields +func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force) + if err != nil { + return liveObj, managed, err + } + + // Sync the client-side apply annotation only from kubectl server-side apply. + // To opt-out of this behavior, users may specify a different field manager. + // + // If the client-side apply annotation doesn't exist, + // then continue because we have no annotation to update + if manager == "kubectl" && hasLastApplied(liveObj) { + lastAppliedValue, err := buildLastApplied(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err) + } + err = SetLastApplied(liveObj, lastAppliedValue) + if err != nil { + return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err) + } + } + return liveObj, managed, err +} + +func hasLastApplied(obj runtime.Object) bool { + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + var annotations = accessor.GetAnnotations() + if annotations == nil { + return false + } + lastApplied, ok := annotations[LastAppliedConfigAnnotation] + return ok && len(lastApplied) > 0 +} + +func buildLastApplied(obj runtime.Object) (string, error) { + obj = obj.DeepCopyObject() + + var accessor, err = meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + // Remove the annotation from the object before encoding the object + var annotations = accessor.GetAnnotations() + delete(annotations, LastAppliedConfigAnnotation) + accessor.SetAnnotations(annotations) + + lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + if err != nil { + return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err) + } + return string(lastApplied), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go new file mode 100644 index 00000000..9b4c2032 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go @@ -0,0 +1,248 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type ManagedInterface interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +type managedStruct struct { + fields fieldpath.ManagedFields + times map[string]*metav1.Time +} + +var _ ManagedInterface = &managedStruct{} + +// Fields implements ManagedInterface. +func (m *managedStruct) Fields() fieldpath.ManagedFields { + return m.fields +} + +// Times implements ManagedInterface. +func (m *managedStruct) Times() map[string]*metav1.Time { + return m.times +} + +// NewEmptyManaged creates an empty ManagedInterface. +func NewEmptyManaged() ManagedInterface { + return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{}) +} + +// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation. +func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface { + return &managedStruct{ + fields: f, + times: t, + } +} + +// RemoveObjectManagedFields removes the ManagedFields from the object +// before we merge so that it doesn't appear in the ManagedFields +// recursively. +func RemoveObjectManagedFields(obj runtime.Object) { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + accessor.SetManagedFields(nil) +} + +// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields +func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error { + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + + encodedManagedFields, err := encodeManagedFields(managed) + if err != nil { + return fmt.Errorf("failed to convert back managed fields to API: %v", err) + } + accessor.SetManagedFields(encodedManagedFields) + + return nil +} + +// DecodeManagedFields converts ManagedFields from the wire format (api format) +// to the format used by sigs.k8s.io/structured-merge-diff +func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) { + managed := managedStruct{} + managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields)) + managed.times = make(map[string]*metav1.Time, len(encodedManagedFields)) + + for i, encodedVersionedSet := range encodedManagedFields { + switch encodedVersionedSet.Operation { + case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: + default: + return nil, fmt.Errorf("operation must be `Apply` or `Update`") + } + if len(encodedVersionedSet.APIVersion) < 1 { + return nil, fmt.Errorf("apiVersion must not be empty") + } + switch encodedVersionedSet.FieldsType { + case "FieldsV1": + // Valid case. + case "": + return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i) + default: + return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i) + } + manager, err := BuildManagerIdentifier(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err) + } + managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err) + } + managed.times[manager] = encodedVersionedSet.Time + } + return &managed, nil +} + +// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry +func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) { + encodedManagerCopy := *encodedManager + + // Never include fields type in the manager identifier + encodedManagerCopy.FieldsType = "" + + // Never include the fields in the manager identifier + encodedManagerCopy.FieldsV1 = nil + + // Never include the time in the manager identifier + encodedManagerCopy.Time = nil + + // For appliers, don't include the APIVersion in the manager identifier, + // so it will always have the same manager identifier each time it applied. + if encodedManager.Operation == metav1.ManagedFieldsOperationApply { + encodedManagerCopy.APIVersion = "" + } + + // Use the remaining fields to build the manager identifier + b, err := json.Marshal(&encodedManagerCopy) + if err != nil { + return "", fmt.Errorf("error marshalling manager identifier: %v", err) + } + + return string(b), nil +} + +func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) { + fields := EmptyFields + if encodedVersionedSet.FieldsV1 != nil { + fields = *encodedVersionedSet.FieldsV1 + } + set, err := FieldsToSet(fields) + if err != nil { + return nil, fmt.Errorf("error decoding set: %v", err) + } + return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil +} + +// encodeManagedFields converts ManagedFields from the format used by +// sigs.k8s.io/structured-merge-diff to the wire format (api format) +func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) { + if len(managed.Fields()) == 0 { + return nil, nil + } + encodedManagedFields = []metav1.ManagedFieldsEntry{} + for manager := range managed.Fields() { + versionedSet := managed.Fields()[manager] + v, err := encodeManagerVersionedSet(manager, versionedSet) + if err != nil { + return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err) + } + if t, ok := managed.Times()[manager]; ok { + v.Time = t + } + encodedManagedFields = append(encodedManagedFields, *v) + } + return sortEncodedManagedFields(encodedManagedFields) +} + +func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) { + sort.Slice(encodedManagedFields, func(i, j int) bool { + p, q := encodedManagedFields[i], encodedManagedFields[j] + + if p.Operation != q.Operation { + return p.Operation < q.Operation + } + + pSeconds, qSeconds := int64(0), int64(0) + if p.Time != nil { + pSeconds = p.Time.Unix() + } + if q.Time != nil { + qSeconds = q.Time.Unix() + } + if pSeconds != qSeconds { + return pSeconds < qSeconds + } + + if p.Manager != q.Manager { + return p.Manager < q.Manager + } + + if p.APIVersion != q.APIVersion { + return p.APIVersion < q.APIVersion + } + return p.Subresource < q.Subresource + }) + + return encodedManagedFields, nil +} + +func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) { + encodedVersionedSet = &metav1.ManagedFieldsEntry{} + + // Get as many fields as we can from the manager identifier + err = json.Unmarshal([]byte(manager), encodedVersionedSet) + if err != nil { + return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err) + } + + // Get the APIVersion, Operation, and Fields from the VersionedSet + encodedVersionedSet.APIVersion = string(versionedSet.APIVersion()) + if versionedSet.Applied() { + encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply + } + encodedVersionedSet.FieldsType = "FieldsV1" + fields, err := SetToFields(*versionedSet.Set()) + if err != nil { + return nil, fmt.Errorf("error encoding set: %v", err) + } + encodedVersionedSet.FieldsV1 = &fields + + return encodedVersionedSet, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go new file mode 100644 index 00000000..376eed6b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type managedFieldsUpdater struct { + fieldManager Manager +} + +var _ Manager = &managedFieldsUpdater{} + +// NewManagedFieldsUpdater is responsible for updating the managedfields +// in the object, updating the time of the operation as necessary. For +// updates, it uses a hard-coded manager to detect if things have +// changed, and swaps back the correct manager after the operation is +// done. +func NewManagedFieldsUpdater(fieldManager Manager) Manager { + return &managedFieldsUpdater{ + fieldManager: fieldManager, + } +} + +// Update implements Manager. +func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + self := "current-operation" + object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self) + if err != nil { + return object, managed, err + } + + // If the current operation took any fields from anything, it means the object changed, + // so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager + if vs, ok := managed.Fields()[self]; ok { + delete(managed.Fields(), self) + + if previous, ok := managed.Fields()[manager]; ok { + managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied()) + } else { + managed.Fields()[manager] = vs + } + + managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()} + } + + return object, managed, nil +} + +// Apply implements Manager. +func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) + if err != nil { + return object, managed, err + } + if object != nil { + managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()} + } else { + object = liveObj.DeepCopyObject() + RemoveObjectManagedFields(object) + } + return object, managed, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go new file mode 100644 index 00000000..05393610 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation. +type Managed interface { + // Fields gets the fieldpath.ManagedFields. + Fields() fieldpath.ManagedFields + + // Times gets the timestamps associated with each operation. + Times() map[string]*metav1.Time +} + +// Manager updates the managed fields and merges applied configurations. +type Manager interface { + // Update is used when the object has already been merged (non-apply + // use-case), and simply updates the managed fields in the output + // object. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) + + // Apply is used when server-side apply is called, as it merges the + // object and updates the managed fields. + // * `liveObj` is not mutated by this function + // * `newObj` may be mutated by this function + // Returns the new object with managedFields removed, and the object's new + // proposed managedFields separately. + Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go new file mode 100644 index 00000000..1954d65d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +const ( + // Field indicates that the content of this path element is a field's name + Field = "f" + + // Value indicates that the content of this path element is a field's value + Value = "v" + + // Index indicates that the content of this path element is an index in an array + Index = "i" + + // Key indicates that the content of this path element is a key value map + Key = "k" + + // Separator separates the type of a path element from the contents + Separator = ":" +) + +// NewPathElement parses a serialized path element +func NewPathElement(s string) (fieldpath.PathElement, error) { + split := strings.SplitN(s, Separator, 2) + if len(split) < 2 { + return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s) + } + switch split[0] { + case Field: + return fieldpath.PathElement{ + FieldName: &split[1], + }, nil + case Value: + val, err := value.FromJSON([]byte(split[1])) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Value: &val, + }, nil + case Index: + i, err := strconv.Atoi(split[1]) + if err != nil { + return fieldpath.PathElement{}, err + } + return fieldpath.PathElement{ + Index: &i, + }, nil + case Key: + kv := map[string]json.RawMessage{} + err := json.Unmarshal([]byte(split[1]), &kv) + if err != nil { + return fieldpath.PathElement{}, err + } + fields := value.FieldList{} + for k, v := range kv { + b, err := json.Marshal(v) + if err != nil { + return fieldpath.PathElement{}, err + } + val, err := value.FromJSON(b) + if err != nil { + return fieldpath.PathElement{}, err + } + + fields = append(fields, value.Field{ + Name: k, + Value: val, + }) + } + return fieldpath.PathElement{ + Key: &fields, + }, nil + default: + // Ignore unknown key types + return fieldpath.PathElement{}, nil + } +} + +// PathElementString serializes a path element +func PathElementString(pe fieldpath.PathElement) (string, error) { + switch { + case pe.FieldName != nil: + return Field + Separator + *pe.FieldName, nil + case pe.Key != nil: + kv := map[string]json.RawMessage{} + for _, k := range *pe.Key { + b, err := value.ToJSON(k.Value) + if err != nil { + return "", err + } + m := json.RawMessage{} + err = json.Unmarshal(b, &m) + if err != nil { + return "", err + } + kv[k.Name] = m + } + b, err := json.Marshal(kv) + if err != nil { + return "", err + } + return Key + ":" + string(b), nil + case pe.Value != nil: + b, err := value.ToJSON(*pe.Value) + if err != nil { + return "", err + } + return Value + ":" + string(b), nil + case pe.Index != nil: + return Index + ":" + strconv.Itoa(*pe.Index), nil + default: + return "", errors.New("Invalid type of path element") + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go new file mode 100644 index 00000000..f24c040e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go @@ -0,0 +1,89 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + "math/rand" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +type skipNonAppliedManager struct { + fieldManager Manager + objectCreater runtime.ObjectCreater + beforeApplyManagerName string + probability float32 +} + +var _ Manager = &skipNonAppliedManager{} + +// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply. +func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater) Manager { + return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, 0.0) +} + +// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply, +// or starts tracking on create with p probability. +func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, p float32) Manager { + return &skipNonAppliedManager{ + fieldManager: fieldManager, + objectCreater: objectCreater, + beforeApplyManagerName: "before-first-apply", + probability: p, + } +} + +// Update implements Manager. +func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + accessor, err := meta.Accessor(liveObj) + if err != nil { + return newObj, managed, nil + } + + // If managed fields is empty, we need to determine whether to skip tracking managed fields. + if len(managed.Fields()) == 0 { + // Check if the operation is a create, by checking whether lastObj's UID is empty. + // If the operation is create, P(tracking managed fields) = f.probability + // If the operation is update, skip tracking managed fields, since we already know managed fields is empty. + if len(accessor.GetUID()) == 0 { + if f.probability <= rand.Float32() { + return newObj, managed, nil + } + } else { + return newObj, managed, nil + } + } + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if len(managed.Fields()) == 0 { + gvk := appliedObj.GetObjectKind().GroupVersionKind() + emptyObj, err := f.objectCreater.New(gvk) + if err != nil { + return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err) + } + liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName) + if err != nil { + return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err) + } + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go new file mode 100644 index 00000000..9b61f3a6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +type stripMetaManager struct { + fieldManager Manager + + // stripSet is the list of fields that should never be part of a mangedFields. + stripSet *fieldpath.Set +} + +var _ Manager = &stripMetaManager{} + +// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset. +func NewStripMetaManager(fieldManager Manager) Manager { + return &stripMetaManager{ + fieldManager: fieldManager, + stripSet: fieldpath.NewSet( + fieldpath.MakePathOrDie("apiVersion"), + fieldpath.MakePathOrDie("kind"), + fieldpath.MakePathOrDie("metadata"), + fieldpath.MakePathOrDie("metadata", "name"), + fieldpath.MakePathOrDie("metadata", "namespace"), + fieldpath.MakePathOrDie("metadata", "creationTimestamp"), + fieldpath.MakePathOrDie("metadata", "selfLink"), + fieldpath.MakePathOrDie("metadata", "uid"), + fieldpath.MakePathOrDie("metadata", "clusterName"), + fieldpath.MakePathOrDie("metadata", "generation"), + fieldpath.MakePathOrDie("metadata", "managedFields"), + fieldpath.MakePathOrDie("metadata", "resourceVersion"), + ), + } +} + +// Update implements Manager. +func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force) + if err != nil { + return nil, nil, err + } + f.stripFields(managed.Fields(), manager) + return newObj, managed, nil +} + +// stripFields removes a predefined set of paths found in typed from managed +func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) { + vs, ok := managed[manager] + if ok { + if vs == nil { + panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager)) + } + newSet := vs.Set().Difference(f.stripSet) + if newSet.Empty() { + delete(managed, manager) + } else { + managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied()) + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go new file mode 100644 index 00000000..786ad991 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -0,0 +1,189 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +type structuredMergeManager struct { + typeConverter TypeConverter + objectConverter runtime.ObjectConvertor + objectDefaulter runtime.ObjectDefaulter + groupVersion schema.GroupVersion + hubVersion schema.GroupVersion + updater merge.Updater +} + +var _ Manager = &structuredMergeManager{} + +// NewStructuredMergeManager creates a new Manager that merges apply requests +// and update managed fields for other types of requests. +func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) { + if typeConverter == nil { + return nil, fmt.Errorf("typeconverter must not be nil") + } + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s + IgnoredFields: resetFields, + }, + }, nil +} + +// NewCRDStructuredMergeManager creates a new Manager specifically for +// CRDs. This allows for the possibility of fields which are not defined +// in models, as well as having no models defined at all. +func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) { + return &structuredMergeManager{ + typeConverter: typeConverter, + objectConverter: objectConverter, + objectDefaulter: objectDefaulter, + groupVersion: gv, + hubVersion: hub, + updater: merge.Updater{ + Converter: newCRDVersionConverter(typeConverter, objectConverter, hub), + IgnoredFields: resetFields, + }, + }, nil +} + +func objectGVKNN(obj runtime.Object) string { + name := "" + namespace := "" + if accessor, err := meta.Accessor(obj); err == nil { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + + return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind()) +} + +// Update implements Manager. +func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err) + } + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned, typed.AllowDuplicates) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) + } + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) + } + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + + // TODO(apelisse) use the first return value when unions are implemented + _, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager) + if err != nil { + return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err) + } + managed = NewManaged(managedFields, managed.Times()) + + return newObj, managed, nil +} + +// Apply implements Manager. +func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) { + // Check that the patch object has the same version as the live object + if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion { + return nil, nil, + errors.NewBadRequest( + fmt.Sprintf("Incorrect version specified in apply patch. "+ + "Specified patch version: %s, expected: %s", + patchVersion, f.groupVersion)) + } + + patchObjMeta, err := meta.Accessor(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("couldn't get accessor: %v", err) + } + if patchObjMeta.GetManagedFields() != nil { + return nil, nil, errors.NewBadRequest("metadata.managedFields must be nil") + } + + liveObjVersioned, err := f.toVersioned(liveObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) + } + + // Don't allow duplicates in the applied object. + patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) + } + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) + if err != nil { + return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) + } + + apiVersion := fieldpath.APIVersion(f.groupVersion.String()) + newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force) + if err != nil { + return nil, nil, err + } + managed = NewManaged(managedFields, managed.Times()) + + if newObjTyped == nil { + return nil, managed, nil + } + + newObj, err := f.typeConverter.TypedToObject(newObjTyped) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err) + } + + newObjVersioned, err := f.toVersioned(newObj) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err) + } + f.objectDefaulter.Default(newObjVersioned) + + newObjUnversioned, err := f.toUnversioned(newObjVersioned) + if err != nil { + return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err) + } + return newObjUnversioned, managed, nil +} + +func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.groupVersion) +} + +func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) { + return f.objectConverter.ConvertToVersion(obj, f.hubVersion) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go new file mode 100644 index 00000000..c6449467 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/schemaconv" + "k8s.io/kube-openapi/pkg/validation/spec" + smdschema "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter interface { + ObjectToTyped(runtime.Object, ...typed.ValidationOptions) (*typed.TypedValue, error) + TypedToObject(*typed.TypedValue) (runtime.Object, error) +} + +type typeConverter struct { + parser map[schema.GroupVersionKind]*typed.ParseableType +} + +var _ TypeConverter = &typeConverter{} + +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + typeSchema, err := schemaconv.ToSchemaFromOpenAPI(openapiSpec, preserveUnknownFields) + if err != nil { + return nil, fmt.Errorf("failed to convert models to schema: %v", err) + } + + typeParser := typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}} + tr := indexModels(&typeParser, openapiSpec) + + return &typeConverter{parser: tr}, nil +} + +func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + t := c.parser[gvk] + if t == nil { + return nil, NewNoCorrespondingTypeError(gvk) + } + switch o := obj.(type) { + case *unstructured.Unstructured: + return t.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return t.FromStructured(obj, opts...) + } +} + +func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +type deducedTypeConverter struct{} + +// DeducedTypeConverter is a TypeConverter for CRDs that don't have a +// schema. It does implement the same interface though (and create the +// same types of objects), so that everything can still work the same. +// CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return deducedTypeConverter{} +} + +// ObjectToTyped converts an object into a TypedValue with a "deduced type". +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { + switch o := obj.(type) { + case *unstructured.Unstructured: + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent(), opts...) + default: + return typed.DeducedParseableType.FromStructured(obj, opts...) + } +} + +// TypedToObject transforms the typed value into a runtime.Object. That +// is not specific to deduced type. +func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) { + return valueToObject(value.AsValue()) +} + +func valueToObject(val value.Value) (runtime.Object, error) { + vu := val.Unstructured() + switch o := vu.(type) { + case map[string]interface{}: + return &unstructured.Unstructured{Object: o}, nil + default: + return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu) + } +} + +func indexModels( + typeParser *typed.Parser, + openAPISchemas map[string]*spec.Schema, +) map[schema.GroupVersionKind]*typed.ParseableType { + tr := map[schema.GroupVersionKind]*typed.ParseableType{} + for modelName, model := range openAPISchemas { + gvkList := parseGroupVersionKind(model.Extensions) + if len(gvkList) == 0 { + continue + } + + parsedType := typeParser.Type(modelName) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + tr[schema.GroupVersionKind(gvk)] = &parsedType + } + } + } + return tr +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(extensions map[string]interface{}) []schema.GroupVersionKind { + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions["x-kubernetes-group-version-kind"] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + var group, version, kind string + + // gvk extension list must be a map with group, version, and + // kind fields + if gvkMap, ok := gvk.(map[interface{}]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + + } else if gvkMap, ok := gvk.(map[string]interface{}); ok { + group, ok = gvkMap["group"].(string) + if !ok { + continue + } + version, ok = gvkMap["version"].(string) + if !ok { + continue + } + kind, ok = gvkMap["kind"].(string) + if !ok { + continue + } + } else { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go new file mode 100644 index 00000000..ee1e2bca --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type versionCheckManager struct { + fieldManager Manager + gvk schema.GroupVersionKind +} + +var _ Manager = &versionCheckManager{} + +// NewVersionCheckManager creates a manager that makes sure that the +// applied object is in the proper version. +func NewVersionCheckManager(fieldManager Manager, gvk schema.GroupVersionKind) Manager { + return &versionCheckManager{fieldManager: fieldManager, gvk: gvk} +} + +// Update implements Manager. +func (f *versionCheckManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) { + // Nothing to do for updates, this is checked in many other places. + return f.fieldManager.Update(liveObj, newObj, managed, manager) +} + +// Apply implements Manager. +func (f *versionCheckManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) { + if gvk := appliedObj.GetObjectKind().GroupVersionKind(); gvk != f.gvk { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid object type: %v", gvk)) + } + return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go new file mode 100644 index 00000000..45855fa4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go @@ -0,0 +1,123 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +// versionConverter is an implementation of +// sigs.k8s.io/structured-merge-diff/merge.Converter +type versionConverter struct { + typeConverter TypeConverter + objectConvertor runtime.ObjectConvertor + hubGetter func(from schema.GroupVersion) schema.GroupVersion +} + +var _ merge.Converter = &versionConverter{} + +// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor. +func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return schema.GroupVersion{ + Group: from.Group, + Version: h.Version, + } + }, + } +} + +// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor. +func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter { + return &versionConverter{ + typeConverter: t, + objectConvertor: o, + hubGetter: func(from schema.GroupVersion) schema.GroupVersion { + return h + }, + } +} + +// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter +func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) { + // Convert the smd typed value to a kubernetes object. + objectToConvert, err := v.typeConverter.TypedToObject(object) + if err != nil { + return object, err + } + + // Parse the target groupVersion. + groupVersion, err := schema.ParseGroupVersion(string(version)) + if err != nil { + return object, err + } + + // If attempting to convert to the same version as we already have, just return it. + fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion() + if fromVersion == groupVersion { + return object, nil + } + + // Convert to internal + internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion)) + if err != nil { + return object, err + } + + // Convert the object into the target version + convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion) + if err != nil { + return object, err + } + + // Convert the object back to a smd typed value and return it. + return v.typeConverter.ObjectToTyped(convertedObject) +} + +// IsMissingVersionError +func (v *versionConverter) IsMissingVersionError(err error) bool { + return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err) +} + +type noCorrespondingTypeErr struct { + gvk schema.GroupVersionKind +} + +func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error { + return &noCorrespondingTypeErr{gvk: gvk} +} + +func (k *noCorrespondingTypeErr) Error() string { + return fmt.Sprintf("no corresponding type for %v", k.gvk) +} + +func isNoCorrespondingTypeError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*noCorrespondingTypeErr) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml new file mode 100644 index 00000000..a7f2d54f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml @@ -0,0 +1,261 @@ +apiVersion: v1 +kind: Node +metadata: + annotations: + container.googleapis.com/instance_id: "123456789321654789" + node.alpha.kubernetes.io/ttl: "0" + volumes.kubernetes.io/controller-managed-attach-detach: "true" + creationTimestamp: "2019-07-09T16:17:29Z" + labels: + kubernetes.io/arch: amd64 + beta.kubernetes.io/fluentd-ds-ready: "true" + beta.kubernetes.io/instance-type: n1-standard-4 + kubernetes.io/os: linux + cloud.google.com/gke-nodepool: default-pool + cloud.google.com/gke-os-distribution: cos + failure-domain.beta.kubernetes.io/region: us-central1 + failure-domain.beta.kubernetes.io/zone: us-central1-b + topology.kubernetes.io/region: us-central1 + topology.kubernetes.io/zone: us-central1-b + kubernetes.io/hostname: node-default-pool-something + name: node-default-pool-something + resourceVersion: "211582541" + selfLink: /api/v1/nodes/node-default-pool-something + uid: 0c24d0e1-a265-11e9-abe4-42010a80026b +spec: + podCIDR: 10.0.0.1/24 + providerID: some-provider-id-of-some-sort +status: + addresses: + - address: 10.0.0.1 + type: InternalIP + - address: 192.168.0.1 + type: ExternalIP + - address: node-default-pool-something + type: Hostname + allocatable: + cpu: 3920m + ephemeral-storage: "104638878617" + hugepages-2Mi: "0" + memory: 12700100Ki + pods: "110" + capacity: + cpu: "4" + ephemeral-storage: 202086868Ki + hugepages-2Mi: "0" + memory: 15399364Ki + pods: "110" + conditions: + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:08Z" + message: containerd is functioning properly + reason: FrequentContainerdRestart + status: "False" + type: FrequentContainerdRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker overlay2 is functioning properly + reason: CorruptDockerOverlay2 + status: "False" + type: CorruptDockerOverlay2 + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: node is functioning properly + reason: UnregisterNetDevice + status: "False" + type: FrequentUnregisterNetDevice + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: kernel has no deadlock + reason: KernelHasNoDeadlock + status: "False" + type: KernelDeadlock + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:17:04Z" + message: Filesystem is not read-only + reason: FilesystemIsNotReadOnly + status: "False" + type: ReadonlyFilesystem + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:05Z" + message: kubelet is functioning properly + reason: FrequentKubeletRestart + status: "False" + type: FrequentKubeletRestart + - lastHeartbeatTime: "2019-09-20T19:32:08Z" + lastTransitionTime: "2019-07-09T16:22:06Z" + message: docker is functioning properly + reason: FrequentDockerRestart + status: "False" + type: FrequentDockerRestart + - lastHeartbeatTime: "2019-07-09T16:17:47Z" + lastTransitionTime: "2019-07-09T16:17:47Z" + message: RouteController created a route + reason: RouteCreated + status: "False" + type: NetworkUnavailable + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient disk space available + reason: KubeletHasSufficientDisk + status: "False" + type: OutOfDisk + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient memory available + reason: KubeletHasSufficientMemory + status: "False" + type: MemoryPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has no disk pressure + reason: KubeletHasNoDiskPressure + status: "False" + type: DiskPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:29Z" + message: kubelet has sufficient PID available + reason: KubeletHasSufficientPID + status: "False" + type: PIDPressure + - lastHeartbeatTime: "2019-09-20T19:32:50Z" + lastTransitionTime: "2019-07-09T16:17:49Z" + message: kubelet is posting ready status + reason: KubeletReady + status: "True" + type: Ready + daemonEndpoints: + kubeletEndpoint: + Port: 10250 + images: + - names: + - grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8 + - grafana/grafana:4.4.2 + sizeBytes: 287008013 + - names: + - registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b + - registry.k8s.io/node-problem-detector:v0.4.1 + sizeBytes: 286572743 + - names: + - grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033 + - grafana/grafana:4.2.0 + sizeBytes: 277940263 + - names: + - influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc + - influxdb:1.2.2 + sizeBytes: 223948571 + - names: + - gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2 + - gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1 + sizeBytes: 223242132 + - names: + - nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b + - nginx:1.10.1 + sizeBytes: 180708613 + - names: + - registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73 + - registry.k8s.io/fluentd-elasticsearch:v2.0.4 + sizeBytes: 135716379 + - names: + - nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f + - nginx:latest + sizeBytes: 109357355 + - names: + - registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0 + - registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3 + sizeBytes: 102319441 + - names: + - gcr.io/google_containers/kube-proxy:v1.11.10-gke.5 + - registry.k8s.io/kube-proxy:v1.11.10-gke.5 + sizeBytes: 102279340 + - names: + - registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516 + - registry.k8s.io/event-exporter:v0.2.3 + sizeBytes: 94171943 + - names: + - registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662 + - registry.k8s.io/prometheus-to-sd:v0.3.1 + sizeBytes: 88077694 + - names: + - registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff + - registry.k8s.io/fluentd-gcp-scaler:0.5.1 + sizeBytes: 86637208 + - names: + - registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521 + - registry.k8s.io/heapster-amd64:v1.6.0-beta.1 + sizeBytes: 76016169 + - names: + - registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52 + - registry.k8s.io/ingress-glbc-amd64:v1.1.1 + sizeBytes: 67801919 + - names: + - registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09 + - registry.k8s.io/kube-addon-manager:v8.7 + sizeBytes: 63322109 + - names: + - nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315 + - nginx:1.10-alpine + sizeBytes: 54042627 + - names: + - registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869 + - registry.k8s.io/cpvpa-amd64:v0.6.0 + sizeBytes: 51785854 + - names: + - registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d + - registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2 + sizeBytes: 49648481 + - names: + - registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103 + - registry.k8s.io/ip-masq-agent-amd64:v2.1.1 + sizeBytes: 49612505 + - names: + - registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8 + - registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10 + sizeBytes: 49549457 + - names: + - registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450 + - registry.k8s.io/rescheduler:v0.4.0 + sizeBytes: 48973149 + - names: + - registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc + - registry.k8s.io/event-exporter:v0.2.4 + sizeBytes: 47261019 + - names: + - registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848 + - registry.k8s.io/coredns:1.1.3 + sizeBytes: 45587362 + - names: + - prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74 + - prom/prometheus:v1.1.3 + sizeBytes: 45493941 + nodeInfo: + architecture: amd64 + bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61 + containerRuntimeVersion: docker://17.3.2 + kernelVersion: 4.14.127+ + kubeProxyVersion: v1.11.10-gke.5 + kubeletVersion: v1.11.10-gke.5 + machineID: 1739555e5b231057f0f9a0b5fa29511b + operatingSystem: linux + osImage: Container-Optimized OS from Google + systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B + volumesAttached: + - devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + volumesInUse: + - kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049 + - kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049 diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml new file mode 100644 index 00000000..3fb0877d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml @@ -0,0 +1,121 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: some-app + plugin1: some-value + plugin2: some-value + plugin3: some-value + plugin4: some-value + name: some-name + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: ReplicaSet + name: some-name + uid: 0a9d2b9e-779e-11e7-b422-42010a8001be +spec: + containers: + - args: + - one + - two + - three + - four + - five + - six + - seven + - eight + - nine + env: + - name: VAR_3 + valueFrom: + secretKeyRef: + key: some-other-key + name: some-oher-name + - name: VAR_2 + valueFrom: + secretKeyRef: + key: other-key + name: other-name + - name: VAR_1 + valueFrom: + secretKeyRef: + key: some-key + name: some-name + image: some-image-name + imagePullPolicy: IfNotPresent + name: some-name + resources: + requests: + cpu: '0' + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/secrets/kubernetes.io/serviceaccount + name: default-token-hu5jz + readOnly: true + dnsPolicy: ClusterFirst + nodeName: node-name + priority: 0 + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 300 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 300 + volumes: + - name: default-token-hu5jz + secret: + defaultMode: 420 + secretName: default-token-hu5jz +status: + conditions: + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: Initialized + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:41:59Z' + status: 'True' + type: Ready + - lastProbeTime: null + lastTransitionTime: null + status: 'True' + type: ContainersReady + - lastProbeTime: null + lastTransitionTime: '2019-07-08T09:31:18Z' + status: 'True' + type: PodScheduled + containerStatuses: + - containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376 + image: some-image-name + imageID: docker-pullable://some-image-id + lastState: + terminated: + containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565 + exitCode: 255 + finishedAt: '2019-07-08T09:39:09Z' + reason: Error + startedAt: '2019-07-08T09:38:54Z' + name: name + ready: true + restartCount: 6 + state: + running: + startedAt: '2019-07-08T09:41:59Z' + hostIP: 10.0.0.1 + phase: Running + podIP: 10.0.0.1 + qosClass: BestEffort + startTime: '2019-07-08T09:31:18Z' diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go new file mode 100644 index 00000000..48b774ce --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go @@ -0,0 +1,174 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" +) + +var ( + scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"} + replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas") +) + +// ResourcePathMappings maps a group/version to its replicas path. The +// assumption is that all the paths correspond to leaf fields. +type ResourcePathMappings map[string]fieldpath.Path + +// ScaleHandler manages the conversion of managed fields between a main +// resource and the scale subresource +type ScaleHandler struct { + parentEntries []metav1.ManagedFieldsEntry + groupVersion schema.GroupVersion + mappings ResourcePathMappings +} + +// NewScaleHandler creates a new ScaleHandler +func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler { + return &ScaleHandler{ + parentEntries: parentEntries, + groupVersion: groupVersion, + mappings: mappings, + } +} + +// ToSubresource filter the managed fields of the main resource and convert +// them so that they can be handled by scale. +// For the managed fields that have a replicas path it performs two changes: +// 1. APIVersion is changed to the APIVersion of the scale subresource +// 2. Replicas path of the main resource is transformed to the replicas path of +// the scale subresource +func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) { + managed, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + for manager, versionedSet := range managed.Fields() { + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Skip the entry if the APIVersion is unknown + if !ok || path == nil { + continue + } + + if versionedSet.Set().Has(path) { + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(replicasPathInScale), + fieldpath.APIVersion(scaleGroupVersion.String()), + versionedSet.Applied(), + ) + + f[manager] = newVersionedSet + t[manager] = managed.Times()[manager] + } + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +// ToParent merges `scaleEntries` with the entries of the main resource and +// transforms them accordingly +func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) { + decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries) + if err != nil { + return nil, err + } + parentFields := decodedParentEntries.Fields() + + decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries) + if err != nil { + return nil, err + } + scaleFields := decodedScaleEntries.Fields() + + f := fieldpath.ManagedFields{} + t := map[string]*metav1.Time{} + + for manager, versionedSet := range parentFields { + // Get the main resource "replicas" path + path, ok := h.mappings[string(versionedSet.APIVersion())] + // Drop the entry if the APIVersion is unknown. + if !ok { + continue + } + + // If the parent entry does not have the replicas path or it is nil, just + // keep it as it is. The path is nil for Custom Resources without scale + // subresource. + if path == nil || !versionedSet.Set().Has(path) { + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + continue + } + + if _, ok := scaleFields[manager]; !ok { + // "Steal" the replicas path from the main resource entry + newSet := versionedSet.Set().Difference(fieldpath.NewSet(path)) + + if !newSet.Empty() { + newVersionedSet := fieldpath.NewVersionedSet( + newSet, + versionedSet.APIVersion(), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + } else { + // Field wasn't stolen, let's keep the entry as it is. + f[manager] = versionedSet + t[manager] = decodedParentEntries.Times()[manager] + delete(scaleFields, manager) + } + } + + for manager, versionedSet := range scaleFields { + if !versionedSet.Set().Has(replicasPathInScale) { + continue + } + newVersionedSet := fieldpath.NewVersionedSet( + fieldpath.NewSet(h.mappings[h.groupVersion.String()]), + fieldpath.APIVersion(h.groupVersion.String()), + versionedSet.Applied(), + ) + f[manager] = newVersionedSet + t[manager] = decodedParentEntries.Times()[manager] + } + + return managedFieldsEntries(internal.NewManaged(f, t)) +} + +func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) { + obj := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := internal.EncodeObjectManagedFields(obj, entries); err != nil { + return nil, err + } + accessor, err := meta.Accessor(obj) + if err != nil { + panic(fmt.Sprintf("couldn't get accessor: %v", err)) + } + return accessor.GetManagedFields(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go new file mode 100644 index 00000000..d031eefa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managedfields + +import ( + "k8s.io/apimachinery/pkg/util/managedfields/internal" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// TypeConverter allows you to convert from runtime.Object to +// typed.TypedValue and the other way around. +type TypeConverter = internal.TypeConverter + +// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't +// have a schema. It does implement the same interface though (and +// create the same types of objects), so that everything can still work +// the same. CRDs are merged with all their fields being "atomic" (lists +// included). +func NewDeducedTypeConverter() TypeConverter { + return internal.NewDeducedTypeConverter() +} + +// NewTypeConverter builds a TypeConverter from a map of OpenAPIV3 schemas. +// This will automatically find the proper version of the object, and the +// corresponding schema information. +// The keys to the map must be consistent with the names +// used by Refs within the schemas. +// The schemas should conform to the Kubernetes Structural Schema OpenAPI +// restrictions found in docs: +// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema +func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) { + return internal.NewTypeConverter(openapiSpec, preserveUnknownFields) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go index e3962756..25626cf3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/davecgh/go-spew/spew" + "k8s.io/apimachinery/pkg/util/dump" "sigs.k8s.io/yaml" ) @@ -76,7 +76,7 @@ func ToYAMLOrError(v interface{}) string { func toYAML(v interface{}) (string, error) { y, err := yaml.Marshal(v) if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, dump.Pretty(v)) } return string(y), nil @@ -88,8 +88,7 @@ func toYAML(v interface{}) (string, error) { // supports JSON merge patch semantics. // // NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). func HasConflicts(left, right interface{}) (bool, error) { switch typedLeft := left.(type) { case map[string]interface{}: diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 1c2aba55..1635e69a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -20,6 +20,7 @@ import ( "errors" "net" "reflect" + "strings" "syscall" ) @@ -47,6 +48,11 @@ func IsConnectionReset(err error) bool { return false } +// Returns if the given err is "http2: client connection lost" error. +func IsHTTP2ConnectionLost(err error) bool { + return err != nil && strings.Contains(err.Error(), "http2: client connection lost") +} + // Returns if the given err is "connection refused" error func IsConnectionRefused(err error) bool { var errno syscall.Errno diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index d738725c..4fe0c5eb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "net/http" "runtime" @@ -35,7 +36,7 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} +var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via // defer. Additional context-specific handlers can be provided, and will be @@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic} // handlers and logging the panic message. // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) + additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) + for i, handler := range additionalHandlers { + handler := handler // capture loop variable + additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) { + handler(r) + } } + + handleCrash(context.Background(), r, additionalHandlersWithContext...) + } +} + +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// The context is used to determine how to log. +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of HandleCrash and HandleCrash. +// Having those call a common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + for _, fn := range PanicHandlers { + fn(ctx, r) + } + for _, fn := range additionalHandlers { + fn(ctx, r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) } } // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { +func logPanic(ctx context.Context, r interface{}) { if r == http.ErrAbortHandler { // honor the http.ErrAbortHandler sentinel panic value: // ErrAbortHandler is a sentinel panic value to abort a handler. @@ -73,10 +105,20 @@ func logPanic(r interface{}) { const size = 64 << 10 stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + + // For backwards compatibility, conversion to string + // is handled here instead of defering to the logging + // backend. if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace)) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace)) } } @@ -84,35 +126,76 @@ func logPanic(r interface{}) { // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ +var ErrorHandlers = []ErrorHandler{ logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, + func(_ context.Context, _ error, _ string, _ ...interface{}) { + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError() + }, } +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) + // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. +// +// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { return } + handleError(context.Background(), err, "Unhandled Error") +} + +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. The context is used to +// determine how to log the error. +// +// If contextual logging is enabled, the default log output is equivalent to +// +// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...) +// +// Without contextual logging, it is equivalent to: +// +// klog.ErrorS(err, msg, keysAndValues...) +// +// In contrast to HandleError, passing nil for the error is still going to +// trigger a log entry. Don't construct a new error or wrap an error +// with fmt.Errorf. Instead, add additional information via the mssage +// and key/value pairs. +// +// This variant should be used instead of HandleError because it supports +// structured, contextual logging. +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + handleError(ctx, err, msg, keysAndValues...) +} + +// handleError is the common implementation of HandleError and HandleErrorWithContext. +// Using this common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { for _, fn := range ErrorHandlers { - fn(err) + fn(ctx, err, msg, keysAndValues...) } } -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - klog.ErrorDepth(2, err) +// logError prints an error with the call stack of the location it was reported. +// It expects to be called as -> HandleError[WithContext] -> handleError -> logError. +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + logger := klog.FromContext(ctx).WithCallDepth(3) + logger = klog.LoggerWithName(logger, "UnhandledError") + logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs. } type rudimentaryErrorBackoff struct { @@ -125,15 +208,18 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { +func (r *rudimentaryErrorBackoff) OnError() { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index 19488339..fd281bdb 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package sets has generic set and specified sets. Generic set will // replace specified ones over time. And specific ones are deprecated. -package sets +package sets // import "k8s.io/apimachinery/pkg/util/sets" diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go b/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go deleted file mode 100644 index 443dac62..00000000 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/ordered.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sets - -// ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type ordered interface { - integer | float | ~string -} - -// integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type integer interface { - signed | unsigned -} - -// float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type float interface { - ~float32 | ~float64 -} - -// signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go index 99c292fe..cd961c8c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/set.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/set.go @@ -17,6 +17,7 @@ limitations under the License. package sets import ( + "cmp" "sort" ) @@ -37,7 +38,7 @@ func New[T comparable](items ...T) Set[T] { // KeySet creates a Set from a keys of a map[comparable](? extends interface{}). // If the value passed in is not actually a map, this will panic. func KeySet[T comparable, V any](theMap map[T]V) Set[T] { - ret := Set[T]{} + ret := make(Set[T], len(theMap)) for keyValue := range theMap { ret.Insert(keyValue) } @@ -64,6 +65,14 @@ func (s Set[T]) Delete(items ...T) Set[T] { return s } +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +func (s Set[T]) Clear() Set[T] { + clear(s) + return s +} + // Has returns true if and only if item is contained in the set. func (s Set[T]) Has(item T) bool { _, contained := s[item] @@ -179,7 +188,7 @@ func (s1 Set[T]) Equal(s2 Set[T]) bool { return len(s1) == len(s2) && s1.IsSuperset(s2) } -type sortableSliceOfGeneric[T ordered] []T +type sortableSliceOfGeneric[T cmp.Ordered] []T func (g sortableSliceOfGeneric[T]) Len() int { return len(g) } func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) } @@ -189,7 +198,7 @@ func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] // // This is a separate function and not a method because not all types supported // by Generic are ordered and only those can be sorted. -func List[T ordered](s Set[T]) []T { +func List[T cmp.Ordered](s Set[T]) []T { res := make(sortableSliceOfGeneric[T], 0, len(s)) for key := range s { res = append(res, key) @@ -222,6 +231,6 @@ func (s Set[T]) Len() int { return len(s) } -func less[T ordered](lhs, rhs T) bool { +func less[T cmp.Ordered](lhs, rhs T) bool { return lhs < rhs } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS index 4443bafd..73244449 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -1,6 +1,7 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: + - apelisse - pwittrock reviewers: - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index df305b71..85b0cfc0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -20,12 +20,17 @@ import ( "errors" "fmt" "reflect" + "strings" "k8s.io/apimachinery/pkg/util/mergepatch" forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" openapi "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" ) +const patchMergeKey = "x-kubernetes-patch-merge-key" +const patchStrategy = "x-kubernetes-patch-strategy" + type PatchMeta struct { patchStrategies []string patchMergeKey string @@ -148,6 +153,90 @@ func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { return t } +type PatchMetaFromOpenAPIV3 struct { + // SchemaList is required to resolve OpenAPI V3 references + SchemaList map[string]*spec.Schema + Schema *spec.Schema +} + +func (s PatchMetaFromOpenAPIV3) traverse(key string) (PatchMetaFromOpenAPIV3, error) { + if s.Schema == nil { + return PatchMetaFromOpenAPIV3{}, nil + } + if len(s.Schema.Properties) == 0 { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + subschema, ok := s.Schema.Properties[key] + if !ok { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + return PatchMetaFromOpenAPIV3{SchemaList: s.SchemaList, Schema: &subschema}, nil +} + +func resolve(l *PatchMetaFromOpenAPIV3) error { + if len(l.Schema.AllOf) > 0 { + l.Schema = &l.Schema.AllOf[0] + } + if refString := l.Schema.Ref.String(); refString != "" { + str := strings.TrimPrefix(refString, "#/components/schemas/") + sch, ok := l.SchemaList[str] + if ok { + l.Schema = sch + } else { + return fmt.Errorf("unable to resolve %s in OpenAPI V3", refString) + } + } + return nil +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + if l.Schema.Items != nil { + l.Schema = l.Schema.Items.Schema + } + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) Name() string { + schema := s.Schema + if len(schema.Type) > 0 { + return strings.Join(schema.Type, "") + } + return "Struct" +} + type PatchMetaFromOpenAPI struct { Schema openapi.Schema } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 6fb36973..6825a808 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -1106,7 +1106,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // Then, sort them by the relative order in setElementOrder, patch list and live list. // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +// Ref: https://git.k8s.io/design-proposals-archive/cli/preserve-order-in-strategic-merge-patch.md func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive @@ -1182,7 +1182,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, schema Looku merged = originalFieldValue case !foundOriginal && foundPatch: // list was added - merged = patchFieldValue + v, keep := removeDirectives(patchFieldValue) + if !keep { + // Shouldn't be possible since patchFieldValue is a slice + continue + } + + merged = v.([]interface{}) case foundOriginal && foundPatch: merged, err = mergeSliceHandler(originalList, patchList, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) @@ -1270,6 +1276,42 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey return patch, serverOnly, nil } +// Removes directives from an object and returns value to use instead and whether +// or not the field/index should even be kept +// May modify input +func removeDirectives(obj interface{}) (interface{}, bool) { + if obj == nil { + return obj, true + } else if typedV, ok := obj.(map[string]interface{}); ok { + if _, hasDirective := typedV[directiveMarker]; hasDirective { + return nil, false + } + + for k, v := range typedV { + var keep bool + typedV[k], keep = removeDirectives(v) + if !keep { + delete(typedV, k) + } + } + return typedV, true + } else if typedV, ok := obj.([]interface{}); ok { + var res []interface{} + if typedV != nil { + // Make sure res is non-nil if patch is non-nil + res = []interface{}{} + } + for _, v := range typedV { + if newV, keep := removeDirectives(v); keep { + res = append(res, newV) + } + } + return res, true + } else { + return obj, true + } +} + // Merge fields from a patch map into the original map. Note: This may modify // both the original map and the patch because getting a deep copy of a map in // golang is highly non-trivial. @@ -1319,6 +1361,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me // original. Otherwise, check if we want to preserve it or skip it. // Preserving the null value is useful when we want to send an explicit // delete to the API server. + // In some cases, this may lead to inconsistent behavior with create. + // ref: https://github.com/kubernetes/kubernetes/issues/123304 + // To avoid breaking compatibility, + // we made corresponding changes on the client side to ensure that the create and patch behaviors are idempotent. if patchV == nil { delete(original, k) if mergeOptions.IgnoreUnmatchedNulls { @@ -1333,7 +1379,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me if mergeOptions.IgnoreUnmatchedNulls { discardNullValuesFromPatch(patchV) } - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1345,7 +1394,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me if mergeOptions.IgnoreUnmatchedNulls { discardNullValuesFromPatch(patchV) } - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + delete(original, k) + } } continue } @@ -1372,7 +1424,11 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me } original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: - original[k] = patchV + original[k], ok = removeDirectives(patchV) + if !ok { + // if patchV itself is a directive, then don't keep it + delete(original, k) + } } if err != nil { return nil, err @@ -1425,7 +1481,8 @@ func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, return nil, err } - if fieldPatchStrategy == mergeDirective { + // Delete lists are handled the same way regardless of what the field's patch strategy is + if fieldPatchStrategy == mergeDirective || isDeleteList { return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) } else { return typedPatch, nil diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS new file mode 100644 index 00000000..40237324 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index ae73bda9..bc387d01 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -200,12 +200,12 @@ func Invalid(field *Path, value interface{}, detail string) *Error { // NotSupported returns a *Error indicating "unsupported value". // This is used to report unknown values for enumerated fields (e.g. a list of // valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { +func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *Error { detail := "" if len(validValues) > 0 { quotedValues := make([]string, len(validValues)) for i, v := range validValues { - quotedValues[i] = strconv.Quote(v) + quotedValues[i] = strconv.Quote(fmt.Sprint(v)) } detail = "supported values: " + strings.Join(quotedValues, ", ") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index e767092d..b3264490 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -19,10 +19,9 @@ package validation import ( "fmt" "math" - "net" "regexp" - "strconv" "strings" + "unicode" "k8s.io/apimachinery/pkg/util/validation/field" netutils "k8s.io/utils/net" @@ -191,7 +190,13 @@ func IsDNS1123Label(value string) []string { errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) } if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + if dns1123SubdomainRegexp.MatchString(value) { + // It was a valid subdomain and not a valid label. Since we + // already checked length, it must be dots. + errs = append(errs, "must not contain dots") + } else { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } } return errs } @@ -346,11 +351,12 @@ func IsValidPortName(port string) []string { } // IsValidIP tests that the argument is a valid IP address. -func IsValidIP(value string) []string { +func IsValidIP(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList if netutils.ParseIPSloppy(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"} + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) } - return nil + return allErrors } // IsValidIPv4Address tests that the argument is a valid IPv4 address. @@ -373,6 +379,16 @@ func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { return allErrors } +// IsValidCIDR tests that the argument is a valid CIDR value. +func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + _, _, err := netutils.ParseCIDRSloppy(value) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) + } + return allErrors +} + const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" @@ -403,6 +419,9 @@ func IsHTTPHeaderName(value string) []string { const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" +// TODO(hirazawaui): Rename this when the RelaxedEnvironmentVariableValidation gate is removed. +const relaxedEnvVarNameFmtErrMsg string = "a valid environment variable name must consist only of printable ASCII characters other than '='" + var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") // IsEnvVarName tests if a string is a valid environment variable name. @@ -416,6 +435,24 @@ func IsEnvVarName(value string) []string { return errs } +// IsRelaxedEnvVarName tests if a string is a valid environment variable name. +func IsRelaxedEnvVarName(value string) []string { + var errs []string + + if len(value) == 0 { + errs = append(errs, "environment variable name "+EmptyError()) + } + + for _, r := range value { + if r > unicode.MaxASCII || !unicode.IsPrint(r) || r == '=' { + errs = append(errs, relaxedEnvVarNameFmtErrMsg) + break + } + } + + return errs +} + const configMapKeyFmt = `[-._a-zA-Z0-9]+` const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" @@ -487,18 +524,3 @@ func hasChDirPrefix(value string) []string { } return errs } - -// IsValidSocketAddr checks that string represents a valid socket address -// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) -func IsValidSocketAddr(value string) []string { - var errs []string - ip, port, err := net.SplitHostPort(value) - if err != nil { - errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") - return errs - } - portInt, _ := strconv.Atoi(port) - errs = append(errs, IsValidPortNum(portInt)...) - errs = append(errs, IsValidIP(ip)...) - return errs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go new file mode 100644 index 00000000..41876192 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go @@ -0,0 +1,502 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "math" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/utils/clock" +) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step returns an amount of time to sleep determined by the original +// Duration and Jitter. The backoff is mutated to update its Steps and +// Duration. A nil Backoff always has a zero-duration step. +func (b *Backoff) Step() time.Duration { + if b == nil { + return 0 + } + var nextDuration time.Duration + nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter) + return nextDuration +} + +// DelayFunc returns a function that will compute the next interval to +// wait given the arguments in b. It does not mutate the original backoff +// but the function is safe to use only from a single goroutine. +func (b Backoff) DelayFunc() DelayFunc { + steps := b.Steps + duration := b.Duration + cap := b.Cap + factor := b.Factor + jitter := b.Jitter + + return func() time.Duration { + var nextDuration time.Duration + // jitter is applied per step and is not cumulative over multiple steps + nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter) + return nextDuration + } +} + +// Timer returns a timer implementation appropriate to this backoff's parameters +// for use with wait functions. +func (b Backoff) Timer() Timer { + if b.Steps > 1 || b.Jitter != 0 { + return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()} + } + if b.Duration > 0 { + return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration} + } + return newNoopTimer() +} + +// delay implements the core delay algorithm used in this package. +func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) { + // when steps is non-positive, do not alter the base duration + if steps < 1 { + if jitter > 0 { + return Jitter(duration, jitter), duration, 0 + } + return duration, duration, 0 + } + steps-- + + // calculate the next step's interval + if factor != 0 { + next = time.Duration(float64(duration) * factor) + if cap > 0 && next > cap { + next = cap + steps = 0 + } + } else { + next = duration + } + + // add jitter for this step + if jitter > 0 { + duration = Jitter(duration, jitter) + } + + return duration, next, steps + +} + +// DelayWithReset returns a DelayFunc that will return the appropriate next interval to +// wait. Every resetInterval the backoff parameters are reset to their initial state. +// This method is safe to invoke from multiple goroutines, but all calls will advance +// the backoff state when Factor is set. If Factor is zero, this method is the same as +// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is +// zero no backoff will be performed as the same calling DelayFunc with a zero factor +// and steps. +func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc { + if b.Factor <= 0 { + return b.DelayFunc() + } + if resetInterval <= 0 { + b.Steps = 0 + b.Factor = 0 + return b.DelayFunc() + } + return (&backoffManager{ + backoff: b, + initialBackoff: b, + resetInterval: resetInterval, + + clock: c, + lastStart: c.Now(), + timer: nil, + }).Step +} + +// Until loops until stop channel is closed, running f every period. +// +// Until is syntactic sugar on top of JitterUntil with zero jitter factor and +// with sliding = true (which means the timer for period starts after the f +// completes). +func Until(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, true, stopCh) +} + +// UntilWithContext loops until context is done, running f every period. +// +// UntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor and with sliding = true (which means the timer +// for period starts after the f completes). +func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, true) +} + +// NonSlidingUntil loops until stop channel is closed, running f every +// period. +// +// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter +// factor, with sliding = false (meaning the timer for period starts at the same +// time as the function starts). +func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, false, stopCh) +} + +// NonSlidingUntilWithContext loops until context is done, running f every +// period. +// +// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext +// with zero jitter factor, with sliding = false (meaning the timer for period +// starts at the same time as the function starts). +func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { + JitterUntilWithContext(ctx, f, period, 0.0, false) +} + +// JitterUntil loops until stop channel is closed, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Close stopCh to stop. f may not be invoked if stop channel is already +// closed. Pass NeverStop to if you don't want it stop. +func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { + BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) +} + +// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + var t clock.Timer + for { + select { + case <-stopCh: + return + default: + } + + if !sliding { + t = backoff.Backoff() + } + + func() { + defer runtime.HandleCrash() + f() + }() + + if sliding { + t = backoff.Backoff() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and stopCh, and t.C select falls through. + // In order to mitigate we re-check stopCh at the beginning + // of every loop to prevent extra executions of f(). + select { + case <-stopCh: + if !t.Stop() { + <-t.C() + } + return + case <-t.C(): + } + } +} + +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already expired. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) +} + +// backoffManager provides simple backoff behavior in a threadsafe manner to a caller. +type backoffManager struct { + backoff Backoff + initialBackoff Backoff + resetInterval time.Duration + + clock clock.Clock + + lock sync.Mutex + lastStart time.Time + timer clock.Timer +} + +// Step returns the expected next duration to wait. +func (b *backoffManager) Step() time.Duration { + b.lock.Lock() + defer b.lock.Unlock() + + switch { + case b.resetInterval == 0: + b.backoff = b.initialBackoff + case b.clock.Now().Sub(b.lastStart) > b.resetInterval: + b.backoff = b.initialBackoff + b.lastStart = b.clock.Now() + } + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer +// for exponential backoff. The returned timer must be drained before calling Backoff() the second +// time. +func (b *backoffManager) Backoff() clock.Timer { + b.lock.Lock() + defer b.lock.Unlock() + if b.timer == nil { + b.timer = b.clock.NewTimer(b.Step()) + } else { + b.timer.Reset(b.Step()) + } + return b.timer +} + +// Timer returns a new Timer instance that shares the clock and the reset behavior with all other +// timers. +func (b *backoffManager) Timer() Timer { + return DelayFunc(b.Step).Timer(b.clock) +} + +// BackoffManager manages backoff with a particular scheme based on its underlying implementation. +type BackoffManager interface { + // Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not + // safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff + // until Timer.C() drains. If the second Backoff() is called before the timer from the first + // Backoff() call finishes, the first timer will NOT be drained and result in undetermined + // behavior. + Backoff() clock.Timer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type exponentialBackoffManagerImpl struct { + backoff *Backoff + backoffTimer clock.Timer + lastBackoffStart time.Time + initialBackoff time.Duration + backoffResetDuration time.Duration + clock clock.Clock +} + +// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and +// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. +// This backoff manager is used to reduce load during upstream unhealthiness. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then +// invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// delayFn := wait.Backoff{ +// Duration: init, +// Cap: max, +// Steps: int(math.Ceil(float64(max) / float64(init))), // now a required argument +// Factor: factor, +// Jitter: jitter, +// }.DelayWithReset(reset, clock) +// wait.BackoffUntil(..., delayFn.Timer(), ...) +func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { + return &exponentialBackoffManagerImpl{ + backoff: &Backoff{ + Duration: initBackoff, + Factor: backoffFactor, + Jitter: jitter, + + // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not + // what we ideally need here, we set it to max int and assume we will never use up the steps + Steps: math.MaxInt32, + Cap: maxBackoff, + }, + backoffTimer: nil, + initialBackoff: initBackoff, + lastBackoffStart: c.Now(), + backoffResetDuration: resetDuration, + clock: c, + } +} + +func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { + if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { + b.backoff.Steps = math.MaxInt32 + b.backoff.Duration = b.initialBackoff + } + b.lastBackoffStart = b.clock.Now() + return b.backoff.Step() +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time +func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } + return b.backoffTimer +} + +// Deprecated: Will be removed when the legacy polling functions are removed. +type jitteredBackoffManagerImpl struct { + clock clock.Clock + duration time.Duration + jitter float64 + backoffTimer clock.Timer +} + +// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter +// is negative, backoff will not be jittered. +// +// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a +// Backoff struct and invoke Timer() when calling wait.BackoffUntil. +// +// Instead of: +// +// bm := wait.NewJitteredBackoffManager(duration, jitter, clock) +// ... +// wait.BackoffUntil(..., bm.Backoff, ...) +// +// Use: +// +// wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...) +func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { + return &jitteredBackoffManagerImpl{ + clock: c, + duration: duration, + jitter: jitter, + backoffTimer: nil, + } +} + +func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { + jitteredPeriod := j.duration + if j.jitter > 0.0 { + jitteredPeriod = Jitter(j.duration, j.jitter) + } + return jitteredPeriod +} + +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time +func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } + return j.backoffTimer +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +// +// Since backoffs are often subject to cancellation, we recommend using +// ExponentialBackoffWithContext and passing a context to the method. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} + +// ExponentialBackoffWithContext repeats a condition check with exponential backoff. +// It immediately returns an error if the condition returns an error, the context is cancelled +// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout). +// If an error is returned by the condition the backoff stops immediately. The condition will +// never be invoked more than backoff.Steps times. +func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error { + for backoff.Steps > 0 { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok { + return err + } + + if backoff.Steps == 1 { + break + } + + waitBeforeRetry := backoff.Step() + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(waitBeforeRetry): + } + } + + return ErrWaitTimeout +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go new file mode 100644 index 00000000..1d3dcaa7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// DelayFunc returns the next time interval to wait. +type DelayFunc func() time.Duration + +// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes. +// Use Backoff{...}.Timer() for simple delays and more efficient timers. +func (fn DelayFunc) Timer(c clock.Clock) Timer { + return &variableTimer{fn: fn, new: c.NewTimer} +} + +// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This +// offers all of the functionality of the methods in this package. +func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition) +} + +// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that +// wish to share a single delay timer. +func (fn DelayFunc) Concurrent() DelayFunc { + var lock sync.Mutex + return func() time.Duration { + lock.Lock() + defer lock.Unlock() + return fn() + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/error.go b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go new file mode 100644 index 00000000..dd75801d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/error.go @@ -0,0 +1,96 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "errors" +) + +// ErrWaitTimeout is returned when the condition was not satisfied in time. +// +// Deprecated: This type will be made private in favor of Interrupted() +// for checking errors or ErrorInterrupted(err) for returning a wrapped error. +var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition")) + +// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or +// Until loop exited for any reason besides the condition returning true or an +// error. A loop is considered interrupted if the calling context is cancelled, +// the context reaches its deadline, or a backoff reaches its maximum allowed +// steps. +// +// Callers should use this method instead of comparing the error value directly to +// ErrWaitTimeout, as methods that cancel a context may not return that error. +// +// Instead of: +// +// err := wait.Poll(...) +// if err == wait.ErrWaitTimeout { +// log.Infof("Wait for operation exceeded") +// } else ... +// +// Use: +// +// err := wait.Poll(...) +// if wait.Interrupted(err) { +// log.Infof("Wait for operation exceeded") +// } else ... +func Interrupted(err error) bool { + switch { + case errors.Is(err, errWaitTimeout), + errors.Is(err, context.Canceled), + errors.Is(err, context.DeadlineExceeded): + return true + default: + return false + } +} + +// errInterrupted +type errInterrupted struct { + cause error +} + +// ErrorInterrupted returns an error that indicates the wait was ended +// early for a given reason. If no cause is provided a generic error +// will be used but callers are encouraged to provide a real cause for +// clarity in debugging. +func ErrorInterrupted(cause error) error { + switch cause.(type) { + case errInterrupted: + // no need to wrap twice since errInterrupted is only needed + // once in a chain + return cause + default: + return errInterrupted{cause} + } +} + +// errWaitTimeout is the private version of the previous ErrWaitTimeout +// and is private to prevent direct comparison. Use ErrorInterrupted(err) +// to get an error that will return true for Interrupted(err). +var errWaitTimeout = errInterrupted{} + +func (e errInterrupted) Unwrap() error { return e.cause } +func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout } +func (e errInterrupted) Error() string { + if e.cause == nil { + // returns the same error message as historical behavior + return "timed out waiting for the condition" + } + return e.cause.Error() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go new file mode 100644 index 00000000..107bfc13 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -0,0 +1,95 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// loopConditionUntilContext executes the provided condition at intervals defined by +// the provided timer until the provided context is cancelled, the condition returns +// true, or the condition returns an error. If sliding is true, the period is computed +// after condition runs. If it is false then period includes the runtime for condition. +// If immediate is false the first delay happens before any call to condition, if +// immediate is true the condition will be invoked before waiting and guarantees that +// the condition is invoked at least once, regardless of whether the context has been +// cancelled. The returned error is the error returned by the last condition or the +// context error if the context was terminated. +// +// This is the common loop construct for all polling in the wait package. +func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error { + defer t.Stop() + + var timeCh <-chan time.Time + doneCh := ctx.Done() + + if !sliding { + timeCh = t.C() + } + + // if immediate is true the condition is + // guaranteed to be executed at least once, + // if we haven't requested immediate execution, delay once + if immediate { + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + } + + if sliding { + timeCh = t.C() + } + + for { + + // Wait for either the context to be cancelled or the next invocation be called + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } + + // IMPORTANT: Because there is no channel priority selection in golang + // it is possible for very short timers to "win" the race in the previous select + // repeatedly even when the context has been canceled. We therefore must + // explicitly check for context cancellation on every loop and exit if true to + // guarantee that we don't invoke condition more than once after context has + // been cancelled. + if err := ctx.Err(); err != nil { + return err + } + + if !sliding { + t.Next() + } + if ok, err := func() (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) + }(); err != nil || ok { + return err + } + if sliding { + t.Next() + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go new file mode 100644 index 00000000..231d4c38 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go @@ -0,0 +1,315 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "context" + "time" +) + +// PollUntilContextCancel tries a condition func until it returns true, an error, or the context +// is cancelled or hits a deadline. condition will be invoked after the first interval if the +// context is not cancelled first. The returned error will be from ctx.Err(), the condition's +// err return value, or nil. If invoking condition takes longer than interval the next condition +// will be invoked immediately. When using very short intervals, condition may be invoked multiple +// times before a context cancellation is detected. If immediate is true, condition will be +// invoked before waiting and guarantees that condition is invoked at least once, regardless of +// whether the context has been cancelled. +func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error { + return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// PollUntilContextTimeout will terminate polling after timeout duration by setting a context +// timeout. This is provided as a convenience function for callers not currently executing under +// a deadline and is equivalent to: +// +// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) +// err := PollUntilContextCancel(deadlineCtx, interval, immediate, condition) +// +// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying +// inline usage. All other behavior is identical to PollUntilContextCancel. +func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error { + deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout) + defer deadlineCancel() + return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition) +} + +// Poll tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func Poll(interval, timeout time.Duration, condition ConditionFunc) error { + return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollWithContext tries a condition func until it returns true, an error, +// or when the context expires or the timeout is reached, whichever +// happens first. +// +// PollWithContext always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, timeout), condition) +} + +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PollUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollUntilWithContext always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollInfinite tries a condition func until it returns true or an error +// +// PollInfinite always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfinite(interval time.Duration, condition ConditionFunc) error { + return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollInfiniteWithContext tries a condition func until it returns true or an error +// +// PollInfiniteWithContext always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, false, poller(interval, 0), condition) +} + +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// PollImmediate always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) +} + +// PollImmediateWithContext tries a condition func until it returns true, an error, +// or the timeout is reached or the specified context expires, whichever happens first. +// +// PollImmediateWithContext always checks 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to immediately Poll something forever, see PollImmediateInfinite. +// +// Deprecated: This method does not return errors from context, use PollUntilContextTimeout. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, timeout), condition) +} + +// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. +// +// PollImmediateUntil runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext()) +} + +// PollImmediateUntilWithContext tries a condition func until it returns true, +// an error or the specified context is cancelled or expired. +// +// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. +// 'condition' will always be invoked at least once. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +} + +// PollImmediateInfiniteWithContext tries a condition func until it returns true +// or an error or the specified context gets cancelled or expired. +// +// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// Deprecated: This method does not return errors from context, use PollUntilContextCancel. +// Note that the new method will no longer return ErrWaitTimeout and instead return errors +// defined by the context package. Will be removed in a future release. +func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { + return poll(ctx, true, poller(interval, 0), condition) +} + +// Internally used, each of the public 'Poll*' function defined in this +// package should invoke this internal function with appropriate parameters. +// ctx: the context specified by the caller, for infinite polling pass +// a context that never gets cancelled or expired. +// immediate: if true, the 'condition' will be invoked before waiting for the interval, +// in this case 'condition' will always be invoked at least once. +// wait: user specified WaitFunc function that controls at what interval the condition +// function should be invoked periodically and whether it is bound by a timeout. +// condition: user specified ConditionWithContextFunc function. +// +// Deprecated: will be removed in favor of loopConditionUntilContext. +func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error { + if immediate { + done, err := runConditionWithCrashProtectionWithContext(ctx, condition) + if err != nil { + return err + } + if done { + return nil + } + } + + select { + case <-ctx.Done(): + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead + return ErrWaitTimeout + default: + return waitForWithContext(ctx, wait, condition) + } +} + +// poller returns a WaitFunc that will send to the channel every interval until +// timeout has elapsed and then closes the channel. +// +// Over very short intervals you may receive no ticks before the channel is +// closed. A timeout of 0 is interpreted as an infinity, and in such a case +// it would be the caller's responsibility to close the done channel. +// Failure to do so would result in a leaked goroutine. +// +// Output ticks are not buffered. If the channel is not ready to receive an +// item, the tick is skipped. +// +// Deprecated: Will be removed in a future release. +func poller(interval, timeout time.Duration) waitWithContextFunc { + return waitWithContextFunc(func(ctx context.Context) <-chan struct{} { + ch := make(chan struct{}) + + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + // time.After is more convenient, but it + // potentially leaves timers around much longer + // than necessary if we exit early. + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + + for { + select { + case <-tick.C: + // If the consumer isn't ready for this signal drop it and + // check the other channels. + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-ctx.Done(): + return + } + } + }() + + return ch + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go new file mode 100644 index 00000000..3efba321 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go @@ -0,0 +1,121 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "time" + + "k8s.io/utils/clock" +) + +// Timer abstracts how wait functions interact with time runtime efficiently. Test +// code may implement this interface directly but package consumers are encouraged +// to use the Backoff type as the primary mechanism for acquiring a Timer. The +// interface is a simplification of clock.Timer to prevent misuse. Timers are not +// expected to be safe for calls from multiple goroutines. +type Timer interface { + // C returns a channel that will receive a struct{} each time the timer fires. + // The channel should not be waited on after Stop() is invoked. It is allowed + // to cache the returned value of C() for the lifetime of the Timer. + C() <-chan time.Time + // Next is invoked by wait functions to signal timers that the next interval + // should begin. You may only use Next() if you have drained the channel C(). + // You should not call Next() after Stop() is invoked. + Next() + // Stop releases the timer. It is safe to invoke if no other methods have been + // called. + Stop() +} + +type noopTimer struct { + closedCh <-chan time.Time +} + +// newNoopTimer creates a timer with a unique channel to avoid contention +// for the channel's lock across multiple unrelated timers. +func newNoopTimer() noopTimer { + ch := make(chan time.Time) + close(ch) + return noopTimer{closedCh: ch} +} + +func (t noopTimer) C() <-chan time.Time { + return t.closedCh +} +func (noopTimer) Next() {} +func (noopTimer) Stop() {} + +type variableTimer struct { + fn DelayFunc + t clock.Timer + new func(time.Duration) clock.Timer +} + +func (t *variableTimer) C() <-chan time.Time { + if t.t == nil { + d := t.fn() + t.t = t.new(d) + } + return t.t.C() +} +func (t *variableTimer) Next() { + if t.t == nil { + return + } + d := t.fn() + t.t.Reset(d) +} +func (t *variableTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +type fixedTimer struct { + interval time.Duration + t clock.Ticker + new func(time.Duration) clock.Ticker +} + +func (t *fixedTimer) C() <-chan time.Time { + if t.t == nil { + t.t = t.new(t.interval) + } + return t.t.C() +} +func (t *fixedTimer) Next() { + // no-op for fixed timers +} +func (t *fixedTimer) Stop() { + if t.t == nil { + return + } + t.t.Stop() + t.t = nil +} + +var ( + // RealTimer can be passed to methods that need a clock.Timer. + RealTimer = clock.RealClock{}.NewTimer +) + +var ( + // internalClock is used for test injection of clocks + internalClock = clock.RealClock{} +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 137627b4..6805e8cf 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -18,14 +18,11 @@ package wait import ( "context" - "errors" - "math" "math/rand" "sync" "time" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/utils/clock" ) // For any test of the style: @@ -83,113 +80,6 @@ func Forever(f func(), period time.Duration) { Until(f, period, NeverStop) } -// Until loops until stop channel is closed, running f every period. -// -// Until is syntactic sugar on top of JitterUntil with zero jitter factor and -// with sliding = true (which means the timer for period starts after the f -// completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. -// -// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = false (meaning the timer for period starts at the same -// time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Close stopCh to stop. f may not be invoked if stop channel is already -// closed. Pass NeverStop to if you don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) -} - -// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { - var t clock.Timer - for { - select { - case <-stopCh: - return - default: - } - - if !sliding { - t = backoff.Backoff() - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = backoff.Backoff() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and stopCh, and t.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - if !t.Stop() { - <-t.C() - } - return - case <-t.C(): - } - } -} - -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -203,9 +93,6 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration { return wait } -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - // ConditionFunc returns true if the condition is satisfied, or an error // if the loop should be aborted. type ConditionFunc func() (done bool, err error) @@ -223,425 +110,80 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc { } } -// runConditionWithCrashProtection runs a ConditionFunc with crash protection -func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { - return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext()) -} - -// runConditionWithCrashProtectionWithContext runs a -// ConditionWithContextFunc with crash protection. -func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { - defer runtime.HandleCrash() - return condition(ctx) -} - -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration, if factor is not zero - // and the limits imposed by Steps and Cap have not been reached. - // Should not be negative. - // The jitter does not contribute to the updates to the duration parameter. - Factor float64 - // The sleep at each iteration is the duration plus an additional - // amount chosen uniformly at random from the interval between - // zero and `jitter*duration`. - Jitter float64 - // The remaining number of iterations in which the duration - // parameter may change (but progress can be stopped earlier by - // hitting the cap). If not positive, the duration is not - // changed. Used for exponential backoff in combination with - // Factor and Cap. - Steps int - // A limit on revised values of the duration parameter. If a - // multiplication by the factor parameter would make the duration - // exceed the cap then the duration is set to the cap and the - // steps parameter is set to zero. - Cap time.Duration -} - -// Step (1) returns an amount of time to sleep determined by the -// original Duration and Jitter and (2) mutates the provided Backoff -// to update its Steps and Duration. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// ContextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func ContextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - -// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in -// undetermined behavior. -// The BackoffManager is supposed to be called in a single-threaded environment. -type BackoffManager interface { - Backoff() clock.Timer -} - -type exponentialBackoffManagerImpl struct { - backoff *Backoff - backoffTimer clock.Timer - lastBackoffStart time.Time - initialBackoff time.Duration - backoffResetDuration time.Duration - clock clock.Clock -} - -// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and -// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. -// This backoff manager is used to reduce load during upstream unhealthiness. -func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { - return &exponentialBackoffManagerImpl{ - backoff: &Backoff{ - Duration: initBackoff, - Factor: backoffFactor, - Jitter: jitter, - - // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not - // what we ideally need here, we set it to max int and assume we will never use up the steps - Steps: math.MaxInt32, - Cap: maxBackoff, - }, - backoffTimer: nil, - initialBackoff: initBackoff, - lastBackoffStart: c.Now(), - backoffResetDuration: resetDuration, - clock: c, - } -} - -func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { - if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { - b.backoff.Steps = math.MaxInt32 - b.backoff.Duration = b.initialBackoff - } - b.lastBackoffStart = b.clock.Now() - return b.backoff.Step() -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. -// The returned timer must be drained before calling Backoff() the second time -func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - if b.backoffTimer == nil { - b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) - } else { - b.backoffTimer.Reset(b.getNextBackoff()) - } - return b.backoffTimer -} - -type jitteredBackoffManagerImpl struct { - clock clock.Clock - duration time.Duration - jitter float64 - backoffTimer clock.Timer -} - -// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter -// is negative, backoff will not be jittered. -func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { - return &jitteredBackoffManagerImpl{ - clock: c, - duration: duration, - jitter: jitter, - backoffTimer: nil, - } -} - -func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { - jitteredPeriod := j.duration - if j.jitter > 0.0 { - jitteredPeriod = Jitter(j.duration, j.jitter) - } - return jitteredPeriod +// ContextForChannel provides a context that will be treated as cancelled +// when the provided parentCh is closed. The implementation returns +// context.Canceled for Err() if and only if the parentCh is closed. +func ContextForChannel(parentCh <-chan struct{}) context.Context { + return channelContext{stopCh: parentCh} } -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. -// The returned timer must be drained before calling Backoff() the second time -func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - backoff := j.getNextBackoff() - if j.backoffTimer == nil { - j.backoffTimer = j.clock.NewTimer(backoff) - } else { - j.backoffTimer.Reset(backoff) - } - return j.backoffTimer -} +var _ context.Context = channelContext{} -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It repeatedly checks the condition and then sleeps, using `backoff.Step()` -// to determine the length of the sleep and adjust Duration and Steps. -// Stops and returns as soon as: -// 1. the condition check returns true or an error, -// 2. `backoff.Steps` checks of the condition have been done, or -// 3. a sleep truncated by the cap on duration has been completed. -// In case (1) the returned error is what the condition function returned. -// In all other cases, ErrWaitTimeout is returned. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// Poll always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return PollWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollWithContext tries a condition func until it returns true, an error, -// or when the context expires or the timeout is reached, whichever -// happens first. -// -// PollWithContext always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, timeout), condition) -} - -// PollUntil tries a condition func until it returns true, an error or stopCh is +// channelContext will behave as if the context were cancelled when stopCh is // closed. -// -// PollUntil always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollUntilWithContext always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) -} - -// PollInfinite tries a condition func until it returns true or an error -// -// PollInfinite always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - return PollInfiniteWithContext(context.Background(), interval, condition.WithContext()) -} - -// PollInfiniteWithContext tries a condition func until it returns true or an error -// -// PollInfiniteWithContext always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, false, poller(interval, 0), condition) +type channelContext struct { + stopCh <-chan struct{} } -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext()) -} - -// PollImmediateWithContext tries a condition func until it returns true, an error, -// or the timeout is reached or the specified context expires, whichever happens first. -// -// PollImmediateWithContext always checks 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, timeout), condition) -} - -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := ContextForChannel(stopCh) - defer cancel() - return PollImmediateUntilWithContext(ctx, interval, condition.WithContext()) -} - -// PollImmediateUntilWithContext tries a condition func until it returns true, -// an error or the specified context is cancelled or expired. -// -// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) +func (c channelContext) Done() <-chan struct{} { return c.stopCh } +func (c channelContext) Err() error { + select { + case <-c.stopCh: + return context.Canceled + default: + return nil + } } +func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (c channelContext) Value(key any) any { return nil } -// PollImmediateInfinite tries a condition func until it returns true or an error +// runConditionWithCrashProtection runs a ConditionFunc with crash protection. // -// PollImmediateInfinite runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext()) +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + defer runtime.HandleCrash() + return condition() } -// PollImmediateInfiniteWithContext tries a condition func until it returns true -// or an error or the specified context gets cancelled or expired. +// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc +// with crash protection. // -// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error { - return poll(ctx, true, poller(interval, 0), condition) -} - -// Internally used, each of the public 'Poll*' function defined in this -// package should invoke this internal function with appropriate parameters. -// ctx: the context specified by the caller, for infinite polling pass -// a context that never gets cancelled or expired. -// immediate: if true, the 'condition' will be invoked before waiting for the interval, -// in this case 'condition' will always be invoked at least once. -// wait: user specified WaitFunc function that controls at what interval the condition -// function should be invoked periodically and whether it is bound by a timeout. -// condition: user specified ConditionWithContextFunc function. -func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error { - if immediate { - done, err := runConditionWithCrashProtectionWithContext(ctx, condition) - if err != nil { - return err - } - if done { - return nil - } - } - - select { - case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility - return ErrWaitTimeout - default: - return WaitForWithContext(ctx, wait, condition) - } +// Deprecated: Will be removed when the legacy polling methods are removed. +func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { + defer runtime.HandleCrash() + return condition(ctx) } -// WaitFunc creates a channel that receives an item every time a test +// waitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitFunc func(done <-chan struct{}) <-chan struct{} // WithContext converts the WaitFunc to an equivalent WaitWithContextFunc -func (w WaitFunc) WithContext() WaitWithContextFunc { +func (w waitFunc) WithContext() waitWithContextFunc { return func(ctx context.Context) <-chan struct{} { return w(ctx.Done()) } } -// WaitWithContextFunc creates a channel that receives an item every time a test +// waitWithContextFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. // // When the specified context gets cancelled or expires the function // stops sending item and returns immediately. -type WaitWithContextFunc func(ctx context.Context) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. // -// WaitFor gets a channel from 'wait()”, and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. -// -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - ctx, cancel := ContextForChannel(done) - defer cancel() - return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext()) -} +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +type waitWithContextFunc func(ctx context.Context) <-chan struct{} -// WaitForWithContext continually checks 'fn' as driven by 'wait'. +// waitForWithContext continually checks 'fn' as driven by 'wait'. // -// WaitForWithContext gets a channel from 'wait()”, and then invokes 'fn' +// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn' // once for every value placed on the channel and once more when the // channel is closed. If the channel is closed and 'fn' -// returns false without error, WaitForWithContext returns ErrWaitTimeout. +// returns false without error, waitForWithContext returns ErrWaitTimeout. // // If 'fn' returns an error the loop ends and that error is returned. If // 'fn' returns true the loop ends and nil is returned. @@ -651,8 +193,11 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { // // When the ctx.Done() channel is closed, because the golang `select` statement is // "uniform pseudo-random", the `fn` might still run one or multiple times, -// though eventually `WaitForWithContext` will return. -func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error { +// though eventually `waitForWithContext` will return. +// +// Deprecated: Will be removed in a future release in favor of +// loopConditionUntilContext. +func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error { waitCtx, cancel := context.WithCancel(context.Background()) defer cancel() c := wait(waitCtx) @@ -670,88 +215,9 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit return ErrWaitTimeout } case <-ctx.Done(): - // returning ctx.Err() will break backward compatibility + // returning ctx.Err() will break backward compatibility, use new PollUntilContext* + // methods instead return ErrWaitTimeout } } } - -// poller returns a WaitFunc that will send to the channel every interval until -// timeout has elapsed and then closes the channel. -// -// Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. -// -// Output ticks are not buffered. If the channel is not ready to receive an -// item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitWithContextFunc { - return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-ctx.Done(): - return - } - } - }() - - return ch - }) -} - -// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never -// exceeds the deadline specified by the request context. -func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - - if backoff.Steps == 1 { - break - } - - waitBeforeRetry := backoff.Step() - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(waitBeforeRetry): - } - } - - return ErrWaitTimeout -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/ratelimited_waitgroup.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/ratelimited_waitgroup.go new file mode 100644 index 00000000..8766390f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/ratelimited_waitgroup.go @@ -0,0 +1,134 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package waitgroup + +import ( + "context" + "fmt" + "sync" +) + +// RateLimiter abstracts the rate limiter used by RateLimitedSafeWaitGroup. +// The implementation must be thread-safe. +type RateLimiter interface { + Wait(ctx context.Context) error +} + +// RateLimiterFactoryFunc is used by the RateLimitedSafeWaitGroup to create a new +// instance of a RateLimiter that will be used to rate limit the return rate +// of the active number of request(s). 'count' is the number of requests in +// flight that are expected to invoke 'Done' on this wait group. +type RateLimiterFactoryFunc func(count int) (RateLimiter, context.Context, context.CancelFunc) + +// RateLimitedSafeWaitGroup must not be copied after first use. +type RateLimitedSafeWaitGroup struct { + wg sync.WaitGroup + // Once Wait is initiated, all consecutive Done invocation will be + // rate limited using this rate limiter. + limiter RateLimiter + stopCtx context.Context + + mu sync.Mutex + // wait indicate whether Wait is called, if true, + // then any Add with positive delta will return error. + wait bool + // number of request(s) currently using the wait group + count int +} + +// Add adds delta, which may be negative, similar to sync.WaitGroup. +// If Add with a positive delta happens after Wait, it will return error, +// which prevent unsafe Add. +func (wg *RateLimitedSafeWaitGroup) Add(delta int) error { + wg.mu.Lock() + defer wg.mu.Unlock() + + if wg.wait && delta > 0 { + return fmt.Errorf("add with positive delta after Wait is forbidden") + } + wg.wg.Add(delta) + wg.count += delta + return nil +} + +// Done decrements the WaitGroup counter, rate limiting is applied only +// when the wait group is in waiting mode. +func (wg *RateLimitedSafeWaitGroup) Done() { + var limiter RateLimiter + func() { + wg.mu.Lock() + defer wg.mu.Unlock() + + wg.count -= 1 + if wg.wait { + // we are using the limiter outside the scope of the lock + limiter = wg.limiter + } + }() + + defer wg.wg.Done() + if limiter != nil { + limiter.Wait(wg.stopCtx) + } +} + +// Wait blocks until the WaitGroup counter is zero or a hard limit has elapsed. +// It returns the number of active request(s) accounted for at the time Wait +// has been invoked, number of request(s) that have drianed (done using the +// wait group immediately before Wait returns). +// Ideally, the both numbers returned should be equal, to indicate that all +// request(s) using the wait group have released their lock. +func (wg *RateLimitedSafeWaitGroup) Wait(limiterFactory RateLimiterFactoryFunc) (int, int, error) { + if limiterFactory == nil { + return 0, 0, fmt.Errorf("rate limiter factory must be specified") + } + + var cancel context.CancelFunc + var countNow, countAfter int + func() { + wg.mu.Lock() + defer wg.mu.Unlock() + + wg.limiter, wg.stopCtx, cancel = limiterFactory(wg.count) + countNow = wg.count + wg.wait = true + }() + + defer cancel() + // there should be a hard stop, in case request(s) are not responsive + // enough to invoke Done before the grace period is over. + waitDoneCh := make(chan struct{}) + go func() { + defer close(waitDoneCh) + wg.wg.Wait() + }() + + var err error + select { + case <-wg.stopCtx.Done(): + err = wg.stopCtx.Err() + case <-waitDoneCh: + } + + func() { + wg.mu.Lock() + defer wg.mu.Unlock() + + countAfter = wg.count + }() + return countNow, countAfter, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index b6c7bbfa..ce37fd8c 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -27,13 +27,25 @@ import ( // Interface can be implemented by anything that knows how to watch and report changes. type Interface interface { - // Stop stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. + // Stop tells the producer that the consumer is done watching, so the + // producer should stop sending events and close the result channel. The + // consumer should keep watching for events until the result channel is + // closed. + // + // Because some implementations may create channels when constructed, Stop + // must always be called, even if the consumer has not yet called + // ResultChan(). + // + // Only the consumer should call Stop(), not the producer. If the producer + // errors and needs to stop the watch prematurely, it should instead send + // an error event and close the result channel. Stop() - // ResultChan returns a chan which will receive all the events. If an error occurs - // or Stop() is called, the implementation will close this channel and - // release any resources used by the watch. + // ResultChan returns a channel which will receive events from the event + // producer. If an error occurs or Stop() is called, the producer must + // close this channel and release any resources used by the watch. + // Closing the result channel tells the consumer that no more events will be + // sent. ResultChan() <-chan Event } @@ -322,3 +334,21 @@ func (pw *ProxyWatcher) ResultChan() <-chan Event { func (pw *ProxyWatcher) StopChan() <-chan struct{} { return pw.stopCh } + +// MockWatcher implements watch.Interface with mockable functions. +type MockWatcher struct { + StopFunc func() + ResultChanFunc func() <-chan Event +} + +var _ Interface = &MockWatcher{} + +// Stop calls StopFunc +func (mw MockWatcher) Stop() { + mw.StopFunc() +} + +// ResultChan calls ResultChanFunc +func (mw MockWatcher) ResultChan() <-chan Event { + return mw.ResultChanFunc() +} diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS index c5950219..d16e47e8 100644 --- a/vendor/k8s.io/code-generator/OWNERS +++ b/vendor/k8s.io/code-generator/OWNERS @@ -1,13 +1,16 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - lavalamp + - deads2k + - jpbetz - wojtek-t - sttts reviewers: - - lavalamp + - deads2k - wojtek-t - sttts labels: - sig/api-machinery - area/code-generation +emeritus_approvers: + - lavalamp diff --git a/vendor/k8s.io/code-generator/README.md b/vendor/k8s.io/code-generator/README.md index 122868a5..b0055265 100644 --- a/vendor/k8s.io/code-generator/README.md +++ b/vendor/k8s.io/code-generator/README.md @@ -14,6 +14,10 @@ These code-generators can be used - The example [sample controller](https://github.com/kubernetes/sample-controller) shows a code example of a controller that uses the clients, listers and informers generated by this library. - The article [Kubernetes Deep Dive: Code Generation for CustomResources](https://cloud.redhat.com/blog/kubernetes-deep-dive-code-generation-customresources/) gives a step by step instruction on how to use this library. +## Usage + +The examples above are dated. The current recommended script to use is [kube_codegen.sh](kube_codegen.sh). + ## Compatibility HEAD of this repo will match HEAD of k8s.io/apiserver, k8s.io/apimachinery, and k8s.io/client-go. diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go new file mode 100644 index 00000000..36aa7a37 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/args.go @@ -0,0 +1,88 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "fmt" + + "github.com/spf13/pflag" + "k8s.io/gengo/v2/types" +) + +// Args is a wrapper for arguments to applyconfiguration-gen. +type Args struct { + OutputDir string // must be a directory path + OutputPkg string // must be a Go import-path + + GoHeaderFile string + + // ExternalApplyConfigurations provides the locations of externally generated + // apply configuration types for types referenced by the go structs provided as input. + // Locations are provided as a comma separated list of .: + // entries. + // + // E.g. if a type references appsv1.Deployment, the location of its apply configuration should + // be provided: + // k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1 + // + // meta/v1 types (TypeMeta and ObjectMeta) are always included and do not need to be passed in. + ExternalApplyConfigurations map[types.Name]string + + OpenAPISchemaFilePath string +} + +// New returns default arguments for the generator. +func New() *Args { + return &Args{ + ExternalApplyConfigurations: map[types.Name]string{ + // Always include the applyconfigurations we've generated in client-go. They are sufficient for the vast majority of use cases. + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "Condition"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "LabelSelector"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "LabelSelectorRequirement"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ManagedFieldsEntry"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ObjectMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "OwnerReference"}: "k8s.io/client-go/applyconfigurations/meta/v1", + {Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "TypeMeta"}: "k8s.io/client-go/applyconfigurations/meta/v1", + }, + } +} + +func (args *Args) AddFlags(fs *pflag.FlagSet, inputBase string) { + fs.StringVar(&args.OutputDir, "output-dir", "", + "the base directory under which to generate results") + fs.StringVar(&args.OutputPkg, "output-pkg", args.OutputPkg, + "the Go import-path of the generated results") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") + fs.Var(NewExternalApplyConfigurationValue(&args.ExternalApplyConfigurations, nil), "external-applyconfigurations", + "list of comma separated external apply configurations locations in .: form."+ + "For example: k8s.io/api/apps/v1.Deployment:k8s.io/client-go/applyconfigurations/apps/v1") + fs.StringVar(&args.OpenAPISchemaFilePath, "openapi-schema", "", + "path to the openapi schema containing all the types that apply configurations will be generated for") +} + +// Validate checks the given arguments. +func (args *Args) Validate() error { + if len(args.OutputDir) == 0 { + return fmt.Errorf("--output-dir must be specified") + } + if len(args.OutputPkg) == 0 { + return fmt.Errorf("--output-pkg must be specified") + } + return nil +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go new file mode 100644 index 00000000..fd9b6098 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/args/externaltypes.go @@ -0,0 +1,122 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package args + +import ( + "bytes" + "encoding/csv" + "flag" + "fmt" + "strings" + + "k8s.io/gengo/v2/types" +) + +type externalApplyConfigurationValue struct { + externals *map[types.Name]string +} + +func NewExternalApplyConfigurationValue(externals *map[types.Name]string, def []string) *externalApplyConfigurationValue { + val := new(externalApplyConfigurationValue) + val.externals = externals + if def != nil { + if err := val.set(def); err != nil { + panic(err) + } + } + return val +} + +var _ flag.Value = &externalApplyConfigurationValue{} + +func (s *externalApplyConfigurationValue) set(vs []string) error { + for _, input := range vs { + typ, pkg, err := parseExternalMapping(input) + if err != nil { + return err + } + if _, ok := (*s.externals)[typ]; ok { + return fmt.Errorf("duplicate type found in --external-applyconfigurations: %v", typ) + } + (*s.externals)[typ] = pkg + } + + return nil +} + +func (s *externalApplyConfigurationValue) Set(val string) error { + vs, err := readAsCSV(val) + if err != nil { + return err + } + if err := s.set(vs); err != nil { + return err + } + + return nil +} + +func (s *externalApplyConfigurationValue) Type() string { + return "string" +} + +func (s *externalApplyConfigurationValue) String() string { + var strs []string + for k, v := range *s.externals { + strs = append(strs, fmt.Sprintf("%s.%s:%s", k.Package, k.Name, v)) + } + str, _ := writeAsCSV(strs) + return "[" + str + "]" +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), "\n"), nil +} + +func parseExternalMapping(mapping string) (typ types.Name, pkg string, err error) { + parts := strings.Split(mapping, ":") + if len(parts) != 2 { + return types.Name{}, "", fmt.Errorf("expected string of the form .: but got %s", mapping) + } + packageTypeStr := parts[0] + pkg = parts[1] + // need to split on the *last* dot, since k8s.io (and other valid packages) have a dot in it + lastDot := strings.LastIndex(packageTypeStr, ".") + if lastDot == -1 || lastDot == len(packageTypeStr)-1 { + return types.Name{}, "", fmt.Errorf("expected package and type of the form . but got %s", packageTypeStr) + } + structPkg := packageTypeStr[:lastDot] + structType := packageTypeStr[lastDot+1:] + + return types.Name{Package: structPkg, Name: structType}, pkg, nil +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go new file mode 100644 index 00000000..89f7ed4f --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/applyconfiguration.go @@ -0,0 +1,472 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path" + "strings" + + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" + "k8s.io/klog/v2" + + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" +) + +// applyConfigurationGenerator produces apply configurations for a given GroupVersion and type. +type applyConfigurationGenerator struct { + generator.GoGenerator + // outPkgBase is the base package, under which the "internal" and GV-specific subdirs live + outPkgBase string // must be a Go import-path + localPkg string + groupVersion clientgentypes.GroupVersion + applyConfig applyConfig + imports namer.ImportTracker + refGraph refGraph + openAPIType *string // if absent, extraction function cannot be generated +} + +var _ generator.Generator = &applyConfigurationGenerator{} + +func (g *applyConfigurationGenerator) Filter(_ *generator.Context, t *types.Type) bool { + return t == g.applyConfig.Type +} + +func (g *applyConfigurationGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.localPkg, g.imports), + "singularKind": namer.NewPublicNamer(0), + } +} + +func (g *applyConfigurationGenerator) Imports(*generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +// TypeParams provides a struct that an apply configuration +// is generated for as well as the apply configuration details +// and types referenced by the struct. +type TypeParams struct { + Struct *types.Type + ApplyConfig applyConfig + Tags util.Tags + APIVersion string + ExtractInto *types.Type + ParserFunc *types.Type + OpenAPIType *string +} + +type memberParams struct { + TypeParams + Member types.Member + MemberType *types.Type + JSONTags JSONTags + ArgType *types.Type // only set for maps and slices + EmbeddedIn *memberParams // parent embedded member, if any +} + +func (g *applyConfigurationGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "$", "$") + + klog.V(5).Infof("processing type %v", t) + typeParams := TypeParams{ + Struct: t, + ApplyConfig: g.applyConfig, + Tags: genclientTags(t), + APIVersion: g.groupVersion.ToAPIVersion(), + ExtractInto: extractInto, + ParserFunc: types.Ref(path.Join(g.outPkgBase, "internal"), "Parser"), + OpenAPIType: g.openAPIType, + } + + g.generateStruct(sw, typeParams) + + if typeParams.Tags.GenerateClient { + if typeParams.Tags.NonNamespaced { + sw.Do(clientgenTypeConstructorNonNamespaced, typeParams) + } else { + sw.Do(clientgenTypeConstructorNamespaced, typeParams) + } + if typeParams.OpenAPIType != nil { + g.generateClientgenExtract(sw, typeParams, !typeParams.Tags.NoStatus) + } + } else { + if hasTypeMetaField(t) { + sw.Do(constructorWithTypeMeta, typeParams) + } else { + sw.Do(constructor, typeParams) + } + } + g.generateWithFuncs(t, typeParams, sw, nil) + g.generateGetters(t, typeParams, sw, nil) + return sw.Error() +} + +func hasTypeMetaField(t *types.Type) bool { + for _, member := range t.Members { + if typeMeta.Name == member.Type.Name && member.Embedded { + return true + } + } + return false +} + +func blocklisted(t *types.Type, member types.Member) bool { + if objectMeta.Name == t.Name && member.Name == "ManagedFields" { + return true + } + if objectMeta.Name == t.Name && member.Name == "SelfLink" { + return true + } + // Hide any fields which are en route to deletion. + if strings.HasPrefix(member.Name, "ZZZ_") { + return true + } + return false +} + +func needsGetter(t *types.Type, member types.Member) bool { + // Needed when applying an ApplyConfiguration + return objectMeta.Name == t.Name && member.Name == "Name" +} + +func (g *applyConfigurationGenerator) generateGetters(t *types.Type, typeParams TypeParams, sw *generator.SnippetWriter, embed *memberParams) { + for _, member := range t.Members { + if blocklisted(t, member) { + continue + } + memberType := g.refGraph.applyConfigForType(member.Type) + if g.refGraph.isApplyConfig(member.Type) { + memberType = &types.Type{Kind: types.Pointer, Elem: memberType} + } + if jsonTags, ok := lookupJSONTags(member); ok { + memberParams := memberParams{ + TypeParams: typeParams, + Member: member, + MemberType: memberType, + JSONTags: jsonTags, + EmbeddedIn: embed, + } + if memberParams.Member.Embedded { + g.generateGetters(member.Type, typeParams, sw, &memberParams) + continue + } + + if needsGetter(t, member) { + g.generateMemberGetter(sw, memberParams) + } + } + } +} + +func (g *applyConfigurationGenerator) generateWithFuncs(t *types.Type, typeParams TypeParams, sw *generator.SnippetWriter, embed *memberParams) { + for _, member := range t.Members { + if blocklisted(t, member) { + continue + } + memberType := g.refGraph.applyConfigForType(member.Type) + if g.refGraph.isApplyConfig(member.Type) { + memberType = &types.Type{Kind: types.Pointer, Elem: memberType} + } + if jsonTags, ok := lookupJSONTags(member); ok { + memberParams := memberParams{ + TypeParams: typeParams, + Member: member, + MemberType: memberType, + JSONTags: jsonTags, + EmbeddedIn: embed, + } + if memberParams.Member.Embedded { + g.generateWithFuncs(member.Type, typeParams, sw, &memberParams) + if !jsonTags.inline { + // non-inlined embeds are nillable and need a "ensure exists" utility function + sw.Do(ensureEmbedExists, memberParams) + } + continue + } + + // For slices where the items are generated apply configuration types, accept varargs of + // pointers of the type as "with" function arguments so the "with" function can be used like so: + // WithFoos(Foo().WithName("x"), Foo().WithName("y")) + if t := deref(member.Type); t.Kind == types.Slice && g.refGraph.isApplyConfig(t.Elem) { + memberParams.ArgType = &types.Type{Kind: types.Pointer, Elem: memberType.Elem} + g.generateMemberWithForSlice(sw, member, memberParams) + continue + } + // Note: There are no maps where the values are generated apply configurations (because + // associative lists are used instead). So if a type like this is ever introduced, the + // default "with" function generator will produce a working (but not entirely convenient "with" function) + // that would be used like so: + // WithMap(map[string]FooApplyConfiguration{*Foo().WithName("x")}) + + switch memberParams.Member.Type.Kind { + case types.Slice: + memberParams.ArgType = memberType.Elem + g.generateMemberWithForSlice(sw, member, memberParams) + case types.Map: + g.generateMemberWithForMap(sw, memberParams) + default: + g.generateMemberWith(sw, memberParams) + } + } + } +} + +func (g *applyConfigurationGenerator) generateStruct(sw *generator.SnippetWriter, typeParams TypeParams) { + sw.Do("// $.ApplyConfig.ApplyConfiguration|public$ represents a declarative configuration of the $.ApplyConfig.Type|public$ type for use\n", typeParams) + sw.Do("// with apply.\n", typeParams) + sw.Do("type $.ApplyConfig.ApplyConfiguration|public$ struct {\n", typeParams) + for _, structMember := range typeParams.Struct.Members { + if blocklisted(typeParams.Struct, structMember) { + continue + } + if structMemberTags, ok := lookupJSONTags(structMember); ok { + if !structMemberTags.inline { + structMemberTags.omitempty = true + } + params := memberParams{ + TypeParams: typeParams, + Member: structMember, + MemberType: g.refGraph.applyConfigForType(structMember.Type), + JSONTags: structMemberTags, + } + if structMember.Embedded { + if structMemberTags.inline { + sw.Do("$.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } else { + sw.Do("*$.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } + } else if isNillable(structMember.Type) { + sw.Do("$.Member.Name$ $.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } else { + sw.Do("$.Member.Name$ *$.MemberType|raw$ `json:\"$.JSONTags$\"`\n", params) + } + } + } + sw.Do("}\n", typeParams) +} + +func deref(t *types.Type) *types.Type { + for t.Kind == types.Pointer { + t = t.Elem + } + return t +} + +func isNillable(t *types.Type) bool { + return t.Kind == types.Slice || t.Kind == types.Map +} + +func (g *applyConfigurationGenerator) generateMemberWith(sw *generator.SnippetWriter, memberParams memberParams) { + sw.Do("// With$.Member.Name$ sets the $.Member.Name$ field in the declarative configuration to the given value\n", memberParams) + sw.Do("// and returns the receiver, so that objects can be built by chaining \"With\" function invocations.\n", memberParams) + sw.Do("// If called multiple times, the $.Member.Name$ field is set to the value of the last call.\n", memberParams) + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(value $.MemberType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) + g.ensureEmbedExistsIfApplicable(sw, memberParams) + if g.refGraph.isApplyConfig(memberParams.Member.Type) || isNillable(memberParams.Member.Type) { + sw.Do("b.$.Member.Name$ = value\n", memberParams) + } else { + sw.Do("b.$.Member.Name$ = &value\n", memberParams) + } + sw.Do(" return b\n", memberParams) + sw.Do("}\n", memberParams) +} + +func (g *applyConfigurationGenerator) generateMemberGetter(sw *generator.SnippetWriter, memberParams memberParams) { + sw.Do("// Get$.Member.Name$ retrieves the value of the $.Member.Name$ field in the declarative configuration.\n", memberParams) + if g.refGraph.isApplyConfig(memberParams.Member.Type) || isNillable(memberParams.Member.Type) { + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) Get$.Member.Name$() $.MemberType|raw$ {\n", memberParams) + } else { + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) Get$.Member.Name$() *$.MemberType|raw$ {\n", memberParams) + } + g.ensureEmbedExistsIfApplicable(sw, memberParams) + sw.Do(" return b.$.Member.Name$\n", memberParams) + sw.Do("}\n", memberParams) +} + +func (g *applyConfigurationGenerator) generateMemberWithForSlice(sw *generator.SnippetWriter, member types.Member, memberParams memberParams) { + memberIsPointerToSlice := member.Type.Kind == types.Pointer + if memberIsPointerToSlice { + sw.Do(ensureNonEmbedSliceExists, memberParams) + } + + sw.Do("// With$.Member.Name$ adds the given value to the $.Member.Name$ field in the declarative configuration\n", memberParams) + sw.Do("// and returns the receiver, so that objects can be build by chaining \"With\" function invocations.\n", memberParams) + sw.Do("// If called multiple times, values provided by each call will be appended to the $.Member.Name$ field.\n", memberParams) + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(values ...$.ArgType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) + g.ensureEmbedExistsIfApplicable(sw, memberParams) + + if memberIsPointerToSlice { + sw.Do("b.ensure$.MemberType.Elem|public$Exists()\n", memberParams) + } + + sw.Do(" for i := range values {\n", memberParams) + if memberParams.ArgType.Kind == types.Pointer { + sw.Do("if values[i] == nil {\n", memberParams) + sw.Do(" panic(\"nil value passed to With$.Member.Name$\")\n", memberParams) + sw.Do("}\n", memberParams) + + if memberIsPointerToSlice { + sw.Do("*b.$.Member.Name$ = append(*b.$.Member.Name$, *values[i])\n", memberParams) + } else { + sw.Do("b.$.Member.Name$ = append(b.$.Member.Name$, *values[i])\n", memberParams) + } + } else { + if memberIsPointerToSlice { + sw.Do("*b.$.Member.Name$ = append(*b.$.Member.Name$, values[i])\n", memberParams) + } else { + sw.Do("b.$.Member.Name$ = append(b.$.Member.Name$, values[i])\n", memberParams) + } + } + sw.Do(" }\n", memberParams) + sw.Do(" return b\n", memberParams) + sw.Do("}\n", memberParams) +} + +func (g *applyConfigurationGenerator) generateMemberWithForMap(sw *generator.SnippetWriter, memberParams memberParams) { + sw.Do("// With$.Member.Name$ puts the entries into the $.Member.Name$ field in the declarative configuration\n", memberParams) + sw.Do("// and returns the receiver, so that objects can be build by chaining \"With\" function invocations.\n", memberParams) + sw.Do("// If called multiple times, the entries provided by each call will be put on the $.Member.Name$ field,\n", memberParams) + sw.Do("// overwriting an existing map entries in $.Member.Name$ field with the same key.\n", memberParams) + sw.Do("func (b *$.ApplyConfig.ApplyConfiguration|public$) With$.Member.Name$(entries $.MemberType|raw$) *$.ApplyConfig.ApplyConfiguration|public$ {\n", memberParams) + g.ensureEmbedExistsIfApplicable(sw, memberParams) + sw.Do(" if b.$.Member.Name$ == nil && len(entries) > 0 {\n", memberParams) + sw.Do(" b.$.Member.Name$ = make($.MemberType|raw$, len(entries))\n", memberParams) + sw.Do(" }\n", memberParams) + sw.Do(" for k, v := range entries {\n", memberParams) + sw.Do(" b.$.Member.Name$[k] = v\n", memberParams) + sw.Do(" }\n", memberParams) + sw.Do(" return b\n", memberParams) + sw.Do("}\n", memberParams) +} + +func (g *applyConfigurationGenerator) ensureEmbedExistsIfApplicable(sw *generator.SnippetWriter, memberParams memberParams) { + // Embedded types that are not inlined must be nillable so they are not included in the apply configuration + // when all their fields are omitted. + if memberParams.EmbeddedIn != nil && !memberParams.EmbeddedIn.JSONTags.inline { + sw.Do("b.ensure$.MemberType.Elem|public$Exists()\n", memberParams.EmbeddedIn) + } +} + +var ensureEmbedExists = ` +func (b *$.ApplyConfig.ApplyConfiguration|public$) ensure$.MemberType.Elem|public$Exists() { + if b.$.MemberType.Elem|public$ == nil { + b.$.MemberType.Elem|public$ = &$.MemberType.Elem|raw${} + } +} +` + +var ensureNonEmbedSliceExists = ` +func (b *$.ApplyConfig.ApplyConfiguration|public$) ensure$.MemberType.Elem|public$Exists() { + if b.$.Member.Name$ == nil { + b.$.Member.Name$ = &[]$.MemberType.Elem|raw${} + } +} +` + +var clientgenTypeConstructorNamespaced = ` +// $.ApplyConfig.Type|public$ constructs a declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$(name, namespace string) *$.ApplyConfig.ApplyConfiguration|public$ { + b := &$.ApplyConfig.ApplyConfiguration|public${} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b +} +` + +var clientgenTypeConstructorNonNamespaced = ` +// $.ApplyConfig.Type|public$ constructs a declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$(name string) *$.ApplyConfig.ApplyConfiguration|public$ { + b := &$.ApplyConfig.ApplyConfiguration|public${} + b.WithName(name) + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b +} +` + +var constructorWithTypeMeta = ` +// $.ApplyConfig.ApplyConfiguration|public$ constructs a declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$() *$.ApplyConfig.ApplyConfiguration|public$ { + b := &$.ApplyConfig.ApplyConfiguration|public${} + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b +} +` + +var constructor = ` +// $.ApplyConfig.ApplyConfiguration|public$ constructs a declarative configuration of the $.ApplyConfig.Type|public$ type for use with +// apply. +func $.ApplyConfig.Type|public$() *$.ApplyConfig.ApplyConfiguration|public$ { + return &$.ApplyConfig.ApplyConfiguration|public${} +} +` + +func (g *applyConfigurationGenerator) generateClientgenExtract(sw *generator.SnippetWriter, typeParams TypeParams, includeStatus bool) { + sw.Do(` +// Extract$.ApplyConfig.Type|public$ extracts the applied configuration owned by fieldManager from +// $.Struct|private$. If no managedFields are found in $.Struct|private$ for fieldManager, a +// $.ApplyConfig.ApplyConfiguration|public$ is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// $.Struct|private$ must be a unmodified $.Struct|public$ API object that was retrieved from the Kubernetes API. +// Extract$.ApplyConfig.Type|public$ provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func Extract$.ApplyConfig.Type|public$($.Struct|private$ *$.Struct|raw$, fieldManager string) (*$.ApplyConfig.ApplyConfiguration|public$, error) { + return extract$.ApplyConfig.Type|public$($.Struct|private$, fieldManager, "") +}`, typeParams) + if includeStatus { + sw.Do(` +// Extract$.ApplyConfig.Type|public$Status is the same as Extract$.ApplyConfig.Type|public$ except +// that it extracts the status subresource applied configuration. +// Experimental! +func Extract$.ApplyConfig.Type|public$Status($.Struct|private$ *$.Struct|raw$, fieldManager string) (*$.ApplyConfig.ApplyConfiguration|public$, error) { + return extract$.ApplyConfig.Type|public$($.Struct|private$, fieldManager, "status") +} +`, typeParams) + } + sw.Do(` +func extract$.ApplyConfig.Type|public$($.Struct|private$ *$.Struct|raw$, fieldManager string, subresource string) (*$.ApplyConfig.ApplyConfiguration|public$, error) { + b := &$.ApplyConfig.ApplyConfiguration|public${} + err := $.ExtractInto|raw$($.Struct|private$, $.ParserFunc|raw$().Type("$.OpenAPIType$"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName($.Struct|private$.Name) +`, typeParams) + if !typeParams.Tags.NonNamespaced { + sw.Do("b.WithNamespace($.Struct|private$.Namespace)\n", typeParams) + } + sw.Do(` + b.WithKind("$.ApplyConfig.Type|singularKind$") + b.WithAPIVersion("$.APIVersion$") + return b, nil +} +`, typeParams) +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go new file mode 100644 index 00000000..abd65010 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/internal.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + + "gopkg.in/yaml.v2" + + "k8s.io/kube-openapi/pkg/schemaconv" + + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" +) + +// utilGenerator generates the ForKind() utility function. +type internalGenerator struct { + generator.GoGenerator + outputPackage string + imports namer.ImportTracker + typeModels *typeModels + filtered bool +} + +var _ generator.Generator = &internalGenerator{} + +func (g *internalGenerator) Filter(*generator.Context, *types.Type) bool { + // generate file exactly once + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *internalGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "singularKind": namer.NewPublicNamer(0), + } +} + +func (g *internalGenerator) Imports(*generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +func (g *internalGenerator) GenerateType(c *generator.Context, _ *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + schema, err := schemaconv.ToSchema(g.typeModels.models) + if err != nil { + return err + } + schemaYAML, err := yaml.Marshal(schema) + if err != nil { + return err + } + sw.Do(schemaBlock, map[string]interface{}{ + "schemaYAML": string(schemaYAML), + "smdParser": smdParser, + "smdNewParser": smdNewParser, + "yamlObject": yamlObject, + "yamlUnmarshal": yamlUnmarshal, + }) + + return sw.Error() +} + +var schemaBlock = ` +func Parser() *{{.smdParser|raw}} { + parserOnce.Do(func() { + var err error + parser, err = {{.smdNewParser|raw}}(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *{{.smdParser|raw}} +var schemaYAML = {{.yamlObject|raw}}(` + "`{{.schemaYAML}}`" + `) +` diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go new file mode 100644 index 00000000..c11d05de --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/jsontagutil.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "reflect" + "strings" + + "k8s.io/gengo/v2/types" +) + +// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236 +// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go + +// JSONTags represents a go json field tag. +type JSONTags struct { + name string + omit bool + inline bool + omitempty bool +} + +func (t JSONTags) String() string { + var tag string + if !t.inline { + tag += t.name + } + if t.omitempty { + tag += ",omitempty" + } + if t.inline { + tag += ",inline" + } + return tag +} + +func lookupJSONTags(m types.Member) (JSONTags, bool) { + tag := reflect.StructTag(m.Tags).Get("json") + if tag == "" || tag == "-" { + return JSONTags{}, false + } + name, opts := parseTag(tag) + if name == "" { + name = m.Name + } + return JSONTags{ + name: name, + omit: false, + inline: opts.Contains("inline"), + omitempty: opts.Contains("omitempty"), + }, true +} + +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, "" +} + +// Contains reports whether a comma-separated listAlias of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go new file mode 100644 index 00000000..67441360 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/openapi.go @@ -0,0 +1,192 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + openapiv2 "github.com/google/gnostic-models/openapiv2" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + "k8s.io/gengo/v2/types" + utilproto "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type typeModels struct { + models utilproto.Models + gvkToOpenAPIType map[clientgentypes.GroupVersionKind]string +} + +func newTypeModels(openAPISchemaFilePath string, pkgTypes map[string]*types.Package) (*typeModels, error) { + if len(openAPISchemaFilePath) == 0 { + return emptyModels, nil // No Extract() functions will be generated. + } + + rawOpenAPISchema, err := os.ReadFile(openAPISchemaFilePath) + if err != nil { + return nil, fmt.Errorf("failed to read openapi-schema file: %w", err) + } + + // Read in the provided openAPI schema. + openAPISchema := &spec.Swagger{} + err = json.Unmarshal(rawOpenAPISchema, openAPISchema) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal typeModels JSON: %w", err) + } + + // Build a mapping from openAPI type name to GVK. + // Find the root types needed by by client-go for apply. + gvkToOpenAPIType := map[clientgentypes.GroupVersionKind]string{} + rootDefs := map[string]spec.Schema{} + for _, p := range pkgTypes { + gv := groupVersion(p) + for _, t := range p.Types { + tags := genclientTags(t) + hasApply := tags.HasVerb("apply") || tags.HasVerb("applyStatus") + if tags.GenerateClient && hasApply { + openAPIType := friendlyName(typeName(t)) + gvk := gv.WithKind(clientgentypes.Kind(t.Name.Name)) + rootDefs[openAPIType] = openAPISchema.Definitions[openAPIType] + gvkToOpenAPIType[gvk] = openAPIType + } + } + } + + // Trim the schema down to just the types needed by client-go for apply. + requiredDefs := make(map[string]spec.Schema) + for name, def := range rootDefs { + requiredDefs[name] = def + findReferenced(&def, openAPISchema.Definitions, requiredDefs) + } + openAPISchema.Definitions = requiredDefs + + // Convert the openAPI schema to the models format and validate it. + models, err := toValidatedModels(openAPISchema) + if err != nil { + return nil, err + } + return &typeModels{models: models, gvkToOpenAPIType: gvkToOpenAPIType}, nil +} + +var emptyModels = &typeModels{ + models: &utilproto.Definitions{}, + gvkToOpenAPIType: map[clientgentypes.GroupVersionKind]string{}, +} + +func toValidatedModels(openAPISchema *spec.Swagger) (utilproto.Models, error) { + // openapi_v2.ParseDocument only accepts a []byte of the JSON or YAML file to be parsed. + // so we do an inefficient marshal back to json and then read it back in as yaml + // but get the benefit of running the models through utilproto.NewOpenAPIData to + // validate all the references between types + rawMinimalOpenAPISchema, err := json.Marshal(openAPISchema) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal openAPI as JSON: %w", err) + } + + document, err := openapiv2.ParseDocument(rawMinimalOpenAPISchema) + if err != nil { + return nil, fmt.Errorf("failed to parse OpenAPI document for file: %w", err) + } + // Construct the models and validate all references are valid. + models, err := utilproto.NewOpenAPIData(document) + if err != nil { + return nil, fmt.Errorf("failed to create OpenAPI models for file: %w", err) + } + return models, nil +} + +// findReferenced recursively finds all schemas referenced from the given def. +// toValidatedModels makes sure no references get missed. +func findReferenced(def *spec.Schema, allSchemas, referencedOut map[string]spec.Schema) { + // follow $ref, if any + refPtr := def.Ref.GetPointer() + if refPtr != nil && !refPtr.IsEmpty() { + name := refPtr.String() + if !strings.HasPrefix(name, "/definitions/") { + return + } + name = strings.TrimPrefix(name, "/definitions/") + schema, ok := allSchemas[name] + if !ok { + panic(fmt.Sprintf("allSchemas schema is missing referenced type: %s", name)) + } + if _, ok := referencedOut[name]; !ok { + referencedOut[name] = schema + findReferenced(&schema, allSchemas, referencedOut) + } + } + + // follow any nested schemas + if def.Items != nil { + if def.Items.Schema != nil { + findReferenced(def.Items.Schema, allSchemas, referencedOut) + } + for _, item := range def.Items.Schemas { + findReferenced(&item, allSchemas, referencedOut) + } + } + if def.AllOf != nil { + for _, s := range def.AllOf { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.AnyOf != nil { + for _, s := range def.AnyOf { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.OneOf != nil { + for _, s := range def.OneOf { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.Not != nil { + findReferenced(def.Not, allSchemas, referencedOut) + } + if def.Properties != nil { + for _, prop := range def.Properties { + findReferenced(&prop, allSchemas, referencedOut) + } + } + if def.AdditionalProperties != nil && def.AdditionalProperties.Schema != nil { + findReferenced(def.AdditionalProperties.Schema, allSchemas, referencedOut) + } + if def.PatternProperties != nil { + for _, s := range def.PatternProperties { + findReferenced(&s, allSchemas, referencedOut) + } + } + if def.Dependencies != nil { + for _, d := range def.Dependencies { + if d.Schema != nil { + findReferenced(d.Schema, allSchemas, referencedOut) + } + } + } + if def.AdditionalItems != nil && def.AdditionalItems.Schema != nil { + findReferenced(def.AdditionalItems.Schema, allSchemas, referencedOut) + } + if def.Definitions != nil { + for _, s := range def.Definitions { + findReferenced(&s, allSchemas, referencedOut) + } + } +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go new file mode 100644 index 00000000..8b467552 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/refgraph.go @@ -0,0 +1,175 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "k8s.io/gengo/v2/types" + + "k8s.io/code-generator/cmd/client-gen/generators/util" +) + +// refGraph maps existing types to the package the corresponding applyConfig types will be generated in +// so that references between apply configurations can be correctly generated. +type refGraph map[types.Name]string + +// refGraphForReachableTypes returns a refGraph that contains all reachable types from +// the root clientgen types of the provided packages. +func refGraphForReachableTypes(universe types.Universe, pkgTypes map[string]*types.Package, initialTypes map[types.Name]string) refGraph { + var refs refGraph = initialTypes + + // Include only types that are reachable from the root clientgen types. + // We don't want to generate apply configurations for types that are not reachable from a root + // clientgen type. + reachableTypes := map[types.Name]*types.Type{} + for _, p := range pkgTypes { + for _, t := range p.Types { + tags := genclientTags(t) + hasApply := tags.HasVerb("apply") || tags.HasVerb("applyStatus") + if tags.GenerateClient && hasApply { + findReachableTypes(t, reachableTypes) + } + // If any apply extensions have custom inputs, add them. + for _, extension := range tags.Extensions { + if extension.HasVerb("apply") { + if len(extension.InputTypeOverride) > 0 { + inputType := *t + if name, pkg := extension.Input(); len(pkg) > 0 { + inputType = *(universe.Type(types.Name{Package: pkg, Name: name})) + } else { + inputType.Name.Name = extension.InputTypeOverride + } + findReachableTypes(&inputType, reachableTypes) + } + } + } + } + } + for pkg, p := range pkgTypes { + for _, t := range p.Types { + if _, ok := reachableTypes[t.Name]; !ok { + continue + } + if requiresApplyConfiguration(t) { + refs[t.Name] = pkg + } + } + } + + return refs +} + +// applyConfigForType find the type used in the generate apply configurations for a field. +// This may either be an existing type or one of the other generated applyConfig types. +func (t refGraph) applyConfigForType(field *types.Type) *types.Type { + switch field.Kind { + case types.Struct: + if pkg, ok := t[field.Name]; ok { // TODO(jpbetz): Refs to types defined in a separate system (e.g. TypeMeta if generating a 3rd party controller) end up referencing the go struct, not the apply configuration type + return types.Ref(pkg, field.Name.Name+ApplyConfigurationTypeSuffix) + } + return field + case types.Map: + if _, ok := t[field.Elem.Name]; ok { + return &types.Type{ + Kind: types.Map, + Elem: t.applyConfigForType(field.Elem), + Key: t.applyConfigForType(field.Key), + } + } + return field + case types.Slice: + if _, ok := t[field.Elem.Name]; ok { + return &types.Type{ + Kind: types.Slice, + Elem: t.applyConfigForType(field.Elem), + } + } + return field + case types.Pointer: + return t.applyConfigForType(field.Elem) + default: + return field + } +} + +func (t refGraph) isApplyConfig(field *types.Type) bool { + switch field.Kind { + case types.Struct: + _, ok := t[field.Name] + return ok + case types.Pointer: + return t.isApplyConfig(field.Elem) + } + return false +} + +// genclientTags returns the genclient Tags for the given type. +func genclientTags(t *types.Type) util.Tags { + return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) +} + +// findReachableTypes finds all types transitively reachable from a given root type, including +// the root type itself. +func findReachableTypes(t *types.Type, referencedTypes map[types.Name]*types.Type) { + if _, ok := referencedTypes[t.Name]; ok { + return + } + referencedTypes[t.Name] = t + + if t.Elem != nil { + findReachableTypes(t.Elem, referencedTypes) + } + if t.Underlying != nil { + findReachableTypes(t.Underlying, referencedTypes) + } + if t.Key != nil { + findReachableTypes(t.Key, referencedTypes) + } + for _, m := range t.Members { + findReachableTypes(m.Type, referencedTypes) + } +} + +// excludeTypes contains well known types that we do not generate apply configurations for. +// Hard coding because we only have two, very specific types that serve a special purpose +// in the type system here. +var excludeTypes = map[types.Name]struct{}{ + rawExtension.Name: {}, + unknown.Name: {}, + // DO NOT ADD TO THIS LIST. If we need to exclude other types, we should consider allowing the + // go type declarations to be annotated as excluded from this generator. +} + +// requiresApplyConfiguration returns true if a type applyConfig should be generated for the given type. +// types applyConfig are only generated for struct types that contain fields with json tags. +func requiresApplyConfiguration(t *types.Type) bool { + for t.Kind == types.Alias { + t = t.Underlying + } + if t.Kind != types.Struct { + return false + } + if _, ok := excludeTypes[t.Name]; ok { + return false + } + var hasJSONTaggedMembers bool + for _, member := range t.Members { + if _, ok := lookupJSONTags(member); ok { + hasJSONTaggedMembers = true + } + } + return hasJSONTaggedMembers +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go new file mode 100644 index 00000000..197e9325 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/targets.go @@ -0,0 +1,314 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "fmt" + "path" + "path/filepath" + "sort" + "strings" + + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" + "k8s.io/klog/v2" + + "k8s.io/code-generator/cmd/applyconfiguration-gen/args" + "k8s.io/code-generator/cmd/client-gen/generators/util" + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" +) + +const ( + // ApplyConfigurationTypeSuffix is the suffix of generated apply configuration types. + ApplyConfigurationTypeSuffix = "ApplyConfiguration" +) + +// NameSystems returns the name system used by the generators in this package. +func NameSystems() namer.NameSystems { + return namer.NameSystems{ + "public": namer.NewPublicNamer(0), + "private": namer.NewPrivateNamer(0), + "raw": namer.NewRawNamer("", nil), + } +} + +// DefaultNameSystem returns the default name system for ordering the types to be +// processed by the generators in this package. +func DefaultNameSystem() string { + return "public" +} + +// GetTargets makes the client target definition. +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, "", gengo.StdGeneratedBy) + if err != nil { + klog.Fatalf("Failed loading boilerplate: %v", err) + } + + pkgTypes := packageTypesForInputs(context, args.OutputPkg) + initialTypes := args.ExternalApplyConfigurations + refs := refGraphForReachableTypes(context.Universe, pkgTypes, initialTypes) + typeModels, err := newTypeModels(args.OpenAPISchemaFilePath, pkgTypes) + if err != nil { + klog.Fatalf("Failed build type models from typeModels %s: %v", args.OpenAPISchemaFilePath, err) + } + + groupVersions := make(map[string]clientgentypes.GroupVersions) + groupGoNames := make(map[string]string) + applyConfigsForGroupVersion := make(map[clientgentypes.GroupVersion][]applyConfig) + + var targetList []generator.Target + for pkg, p := range pkgTypes { + gv := groupVersion(p) + + var toGenerate []applyConfig + for _, t := range p.Types { + // If we don't have an ObjectMeta field, we lack the information required to make the Apply or ApplyStatus call + // to the kube-apiserver, so we don't need to generate the type at all + clientTags := genclientTags(t) + if clientTags.GenerateClient && !hasObjectMetaField(t) { + klog.V(5).Infof("skipping type %v because does not have ObjectMeta", t) + continue + } + gvk := gv.WithKind(clientgentypes.Kind(t.Name.Name)) + openAPIName := typeModels.gvkToOpenAPIType[gvk] + + if typePkg, ok := refs[t.Name]; ok { + toGenerate = append(toGenerate, applyConfig{ + Type: t, + ApplyConfiguration: types.Ref(typePkg, t.Name.Name+ApplyConfigurationTypeSuffix), + OpenAPIName: openAPIName, + }) + } + } + if len(toGenerate) == 0 { + continue // Don't generate empty packages + } + sort.Sort(applyConfigSort(toGenerate)) + + // Apparently we allow the groupName to be overridden in a way that it + // no longer maps to a Go package by name. So we have to figure out + // the offset of this particular output package (pkg) from the base + // output package (args.OutputPkg). + pkgSubdir := strings.TrimPrefix(pkg, args.OutputPkg+"/") + + // generate the apply configurations + targetList = append(targetList, + targetForApplyConfigurationsPackage( + args.OutputDir, args.OutputPkg, pkgSubdir, + boilerplate, gv, toGenerate, refs, typeModels)) + + // group all the generated apply configurations by gv so ForKind() can be generated + groupPackageName := gv.Group.NonEmpty() + groupVersionsEntry, ok := groupVersions[groupPackageName] + if !ok { + groupVersionsEntry = clientgentypes.GroupVersions{ + PackageName: groupPackageName, + Group: gv.Group, + } + } + groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, clientgentypes.PackageVersion{ + Version: gv.Version, + Package: path.Clean(p.Path), + }) + + groupGoNames[groupPackageName] = goName(gv, p) + applyConfigsForGroupVersion[gv] = toGenerate + groupVersions[groupPackageName] = groupVersionsEntry + } + + // generate ForKind() utility function + targetList = append(targetList, + targetForUtils(args.OutputDir, args.OutputPkg, + boilerplate, groupVersions, applyConfigsForGroupVersion, groupGoNames, typeModels)) + // generate internal embedded schema, required for generated Extract functions + targetList = append(targetList, + targetForInternal(args.OutputDir, args.OutputPkg, + boilerplate, typeModels)) + + return targetList +} + +func friendlyName(name string) string { + nameParts := strings.Split(name, "/") + // Reverse first part. e.g., io.k8s... instead of k8s.io... + if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") { + parts := strings.Split(nameParts[0], ".") + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + nameParts[0] = strings.Join(parts, ".") + } + return strings.Join(nameParts, ".") +} + +func typeName(t *types.Type) string { + typePackage := t.Name.Package + return fmt.Sprintf("%s.%s", typePackage, t.Name.Name) +} + +func targetForApplyConfigurationsPackage(outputDirBase, outputPkgBase, pkgSubdir string, boilerplate []byte, gv clientgentypes.GroupVersion, typesToGenerate []applyConfig, refs refGraph, models *typeModels) generator.Target { + outputDir := filepath.Join(outputDirBase, pkgSubdir) + outputPkg := path.Join(outputPkgBase, pkgSubdir) + + return &generator.SimpleTarget{ + PkgName: gv.Version.PackageName(), + PkgPath: outputPkg, + PkgDir: outputDir, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { + for _, toGenerate := range typesToGenerate { + var openAPIType *string + gvk := gv.WithKind(clientgentypes.Kind(toGenerate.Type.Name.Name)) + if v, ok := models.gvkToOpenAPIType[gvk]; ok { + openAPIType = &v + } + + generators = append(generators, &applyConfigurationGenerator{ + GoGenerator: generator.GoGenerator{ + OutputFilename: strings.ToLower(toGenerate.Type.Name.Name) + ".go", + }, + outPkgBase: outputPkgBase, + localPkg: outputPkg, + groupVersion: gv, + applyConfig: toGenerate, + imports: generator.NewImportTracker(), + refGraph: refs, + openAPIType: openAPIType, + }) + } + return generators + }, + } +} + +func targetForUtils(outputDirBase, outputPkgBase string, boilerplate []byte, groupVersions map[string]clientgentypes.GroupVersions, + applyConfigsForGroupVersion map[clientgentypes.GroupVersion][]applyConfig, groupGoNames map[string]string, models *typeModels) generator.Target { + return &generator.SimpleTarget{ + PkgName: path.Base(outputPkgBase), + PkgPath: outputPkgBase, + PkgDir: outputDirBase, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &utilGenerator{ + GoGenerator: generator.GoGenerator{ + OutputFilename: "utils.go", + }, + outputPackage: outputPkgBase, + imports: generator.NewImportTracker(), + groupVersions: groupVersions, + typesForGroupVersion: applyConfigsForGroupVersion, + groupGoNames: groupGoNames, + typeModels: models, + }) + return generators + }, + } +} + +func targetForInternal(outputDirBase, outputPkgBase string, boilerplate []byte, models *typeModels) generator.Target { + outputDir := filepath.Join(outputDirBase, "internal") + outputPkg := path.Join(outputPkgBase, "internal") + return &generator.SimpleTarget{ + PkgName: path.Base(outputPkg), + PkgPath: outputPkg, + PkgDir: outputDir, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { + generators = append(generators, &internalGenerator{ + GoGenerator: generator.GoGenerator{ + OutputFilename: "internal.go", + }, + outputPackage: outputPkgBase, + imports: generator.NewImportTracker(), + typeModels: models, + }) + return generators + }, + } +} + +func goName(gv clientgentypes.GroupVersion, p *types.Package) string { + goName := namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) + if override := gengo.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + goName = namer.IC(override[0]) + } + return goName +} + +func packageTypesForInputs(context *generator.Context, outPkgBase string) map[string]*types.Package { + pkgTypes := map[string]*types.Package{} + for _, inputDir := range context.Inputs { + p := context.Universe.Package(inputDir) + internal := isInternalPackage(p) + if internal { + klog.Warningf("Skipping internal package: %s", p.Path) + continue + } + // This is how the client generator finds the package we are creating. It uses the API package name, not the group name. + // This matches the approach of the client-gen, so the two generator can work together. + // For example, if openshift/api/cloudnetwork/v1 contains an apigroup cloud.network.openshift.io, the client-gen + // builds a package called cloudnetwork/v1 to contain it. This change makes the applyconfiguration-gen use the same. + _, gvPackageString := util.ParsePathGroupVersion(p.Path) + pkg := path.Join(outPkgBase, strings.ToLower(gvPackageString)) + pkgTypes[pkg] = p + } + return pkgTypes +} + +func groupVersion(p *types.Package) (gv clientgentypes.GroupVersion) { + parts := strings.Split(p.Path, "/") + gv.Group = clientgentypes.Group(parts[len(parts)-2]) + gv.Version = clientgentypes.Version(parts[len(parts)-1]) + + // If there's a comment of the form "// +groupName=somegroup" or + // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the + // group when generating. + if override := gengo.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + gv.Group = clientgentypes.Group(override[0]) + } + return gv +} + +// isInternalPackage returns true if the package is an internal package +func isInternalPackage(p *types.Package) bool { + for _, t := range p.Types { + for _, member := range t.Members { + if member.Name == "ObjectMeta" { + return isInternal(member) + } + } + } + return false +} + +// isInternal returns true if the tags for a member do not contain a json tag +func isInternal(m types.Member) bool { + _, ok := lookupJSONTags(m) + return !ok +} + +func hasObjectMetaField(t *types.Type) bool { + for _, member := range t.Members { + if objectMeta.Name == member.Type.Name && member.Embedded { + return true + } + } + return false +} diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go new file mode 100644 index 00000000..bc0b54d0 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/types.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import "k8s.io/gengo/v2/types" + +var ( + applyConfiguration = types.Ref("k8s.io/apimachinery/pkg/runtime", "ApplyConfiguration") + groupVersionKind = types.Ref("k8s.io/apimachinery/pkg/runtime/schema", "GroupVersionKind") + typeMeta = types.Ref("k8s.io/apimachinery/pkg/apis/meta/v1", "TypeMeta") + objectMeta = types.Ref("k8s.io/apimachinery/pkg/apis/meta/v1", "ObjectMeta") + rawExtension = types.Ref("k8s.io/apimachinery/pkg/runtime", "RawExtension") + unknown = types.Ref("k8s.io/apimachinery/pkg/runtime", "Unknown") + extractInto = types.Ref("k8s.io/apimachinery/pkg/util/managedfields", "ExtractInto") + runtimeScheme = types.Ref("k8s.io/apimachinery/pkg/runtime", "Scheme") + smdNewParser = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "NewParser") + smdParser = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "Parser") + testingTypeConverter = types.Ref("k8s.io/client-go/testing", "TypeConverter") + yamlObject = types.Ref("sigs.k8s.io/structured-merge-diff/v4/typed", "YAMLObject") + yamlUnmarshal = types.Ref("gopkg.in/yaml.v2", "Unmarshal") +) diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go new file mode 100644 index 00000000..136d9c49 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/generators/util.go @@ -0,0 +1,176 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "io" + "path" + "sort" + "strings" + + clientgentypes "k8s.io/code-generator/cmd/client-gen/types" + + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" +) + +// utilGenerator generates the ForKind() utility function. +type utilGenerator struct { + generator.GoGenerator + outputPackage string + imports namer.ImportTracker + groupVersions map[string]clientgentypes.GroupVersions + groupGoNames map[string]string + typesForGroupVersion map[clientgentypes.GroupVersion][]applyConfig + filtered bool + typeModels *typeModels +} + +var _ generator.Generator = &utilGenerator{} + +func (g *utilGenerator) Filter(*generator.Context, *types.Type) bool { + // generate file exactly once + if !g.filtered { + g.filtered = true + return true + } + return false +} + +func (g *utilGenerator) Namers(*generator.Context) namer.NameSystems { + return namer.NameSystems{ + "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "singularKind": namer.NewPublicNamer(0), + } +} + +func (g *utilGenerator) Imports(*generator.Context) (imports []string) { + return g.imports.ImportLines() +} + +type group struct { + GroupGoName string + Name string + Versions []*version +} + +type groupSort []group + +func (g groupSort) Len() int { return len(g) } +func (g groupSort) Less(i, j int) bool { + return strings.ToLower(g[i].Name) < strings.ToLower(g[j].Name) +} +func (g groupSort) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +type version struct { + Name string + GoName string + Resources []applyConfig +} + +type versionSort []*version + +func (v versionSort) Len() int { return len(v) } +func (v versionSort) Less(i, j int) bool { + return strings.ToLower(v[i].Name) < strings.ToLower(v[j].Name) +} +func (v versionSort) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +type applyConfig struct { + Type *types.Type + ApplyConfiguration *types.Type + OpenAPIName string +} + +type applyConfigSort []applyConfig + +func (v applyConfigSort) Len() int { return len(v) } +func (v applyConfigSort) Less(i, j int) bool { + return strings.ToLower(v[i].Type.Name.Name) < strings.ToLower(v[j].Type.Name.Name) +} +func (v applyConfigSort) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +func (g *utilGenerator) GenerateType(c *generator.Context, _ *types.Type, w io.Writer) error { + sw := generator.NewSnippetWriter(w, c, "{{", "}}") + + var groups []group + schemeGVs := make(map[*version]*types.Type) + + for groupPackageName, groupVersions := range g.groupVersions { + group := group{ + GroupGoName: g.groupGoNames[groupPackageName], + Name: groupVersions.Group.NonEmpty(), + Versions: []*version{}, + } + for _, v := range groupVersions.Versions { + gv := clientgentypes.GroupVersion{Group: groupVersions.Group, Version: v.Version} + version := &version{ + Name: v.Version.NonEmpty(), + GoName: namer.IC(v.Version.NonEmpty()), + Resources: g.typesForGroupVersion[gv], + } + schemeGVs[version] = c.Universe.Variable(types.Name{ + Package: g.typesForGroupVersion[gv][0].Type.Name.Package, + Name: "SchemeGroupVersion", + }) + group.Versions = append(group.Versions, version) + } + sort.Sort(versionSort(group.Versions)) + groups = append(groups, group) + } + sort.Sort(groupSort(groups)) + + m := map[string]interface{}{ + "applyConfiguration": applyConfiguration, + "groups": groups, + "internalParser": types.Ref(path.Join(g.outputPackage, "internal"), "Parser"), + "runtimeScheme": runtimeScheme, + "schemeGVs": schemeGVs, + "schemaGroupVersionKind": groupVersionKind, + "testingTypeConverter": testingTypeConverter, + } + sw.Do(forKindFunc, m) + sw.Do(typeConverter, m) + + return sw.Error() +} + +var typeConverter = ` +func NewTypeConverter(scheme *{{.runtimeScheme|raw}}) *{{.testingTypeConverter|raw}} { + return &{{.testingTypeConverter|raw}}{Scheme: scheme, TypeResolver: {{.internalParser|raw}}()} +} +` + +var forKindFunc = ` +// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no +// apply configuration type exists for the given GroupVersionKind. +func ForKind(kind {{.schemaGroupVersionKind|raw}}) interface{} { + switch kind { + {{range $group := .groups -}}{{$GroupGoName := .GroupGoName -}} + {{range $version := .Versions -}} + // Group={{$group.Name}}, Version={{.Name}} + {{range .Resources -}} + case {{index $.schemeGVs $version|raw}}.WithKind("{{.Type|singularKind}}"): + return &{{.ApplyConfiguration|raw}}{} + {{end}} + {{end}} + {{end -}} + } + return nil +} +` diff --git a/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/main.go b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/main.go new file mode 100644 index 00000000..f6b03f88 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/applyconfiguration-gen/main.go @@ -0,0 +1,60 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// applyconfiguration-gen is a tool for auto-generating apply builder functions. +package main + +import ( + "flag" + + "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/applyconfiguration-gen/args" + "k8s.io/code-generator/cmd/applyconfiguration-gen/generators" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/klog/v2" +) + +func main() { + klog.InitFlags(nil) + args := args.New() + args.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of applyconfiguration-gen + if err := flag.Set("logtostderr", "true"); err != nil { + klog.Fatalf("Error: %v", err) + } + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + + if err := args.Validate(); err != nil { + klog.Fatalf("Error: %v", err) + } + + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + + // Run it. + if err := gengo.Execute( + generators.NameSystems(), + generators.DefaultNameSystem(), + myTargets, + gengo.StdBuildTag, + pflag.Args(), + ); err != nil { + klog.Fatalf("Error: %v", err) + } + klog.V(2).Info("Completed successfully.") +} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS index 0170a84e..967eb2a7 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -1,10 +1,11 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - lavalamp - wojtek-t - caesarxuchao reviewers: - - lavalamp - wojtek-t - caesarxuchao + - jpbetz +emeritus_approvers: + - lavalamp diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go index 4460ad26..e0914e69 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/args.go @@ -18,19 +18,22 @@ package args import ( "fmt" - "path" "github.com/spf13/pflag" - "k8s.io/gengo/args" "k8s.io/code-generator/cmd/client-gen/types" - codegenutil "k8s.io/code-generator/pkg/util" ) -var DefaultInputDirs = []string{} +type Args struct { + // The directory for the generated results. + OutputDir string + + // The Go import-path of the generated results. + OutputPkg string + + // The boilerplate header for Go files. + GoHeaderFile string -// CustomArgs is a wrapper for arguments to client-gen. -type CustomArgs struct { // A sorted list of group versions to generate. For each of them the package path is found // in GroupVersionToInputPath. Groups []types.GroupVersions @@ -53,69 +56,75 @@ type CustomArgs struct { // For example 'Endpoints:Endpoints', otherwise the pluralizer will generate 'Endpointes'. PluralExceptions []string - // ApplyConfigurationPackage is the package of apply builders generated by typebuilder-gen. + // ApplyConfigurationPackage is the package of apply builders generated by + // applyconfiguration-gen. // If non-empty, Apply functions are generated for each type and reference the apply builders. // If empty (""), Apply functions are not generated. ApplyConfigurationPackage string } -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - genericArgs := args.Default().WithoutDefaultFlagParsing() - customArgs := &CustomArgs{ +func New() *Args { + return &Args{ ClientsetName: "internalclientset", ClientsetAPIPath: "/apis", ClientsetOnly: false, FakeClient: true, - PluralExceptions: []string{"Endpoints:Endpoints"}, ApplyConfigurationPackage: "", } - genericArgs.CustomArgs = customArgs - genericArgs.InputDirs = DefaultInputDirs - - if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { - genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/clientset") - } - - return genericArgs, customArgs } -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) { - gvsBuilder := NewGroupVersionsBuilder(&ca.Groups) - pflag.Var(NewGVPackagesValue(gvsBuilder, nil), "input", "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".") - pflag.Var(NewGVTypesValue(&ca.IncludedTypesOverrides, []string{}), "included-types-overrides", "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.") - pflag.Var(NewInputBasePathValue(gvsBuilder, inputBase), "input-base", "base path to look for the api group.") - pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", ca.ClientsetName, "the name of the generated clientset package.") - pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", ca.ClientsetAPIPath, "the value of default API HTTP path, starting with / and without trailing /.") - pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", ca.ClientsetOnly, "when set, client-gen only generates the clientset shell, without generating the individual typed clients") - pflag.BoolVar(&ca.FakeClient, "fake-clientset", ca.FakeClient, "when set, client-gen will generate the fake clientset that can be used in tests") - - fs.StringSliceVar(&ca.PluralExceptions, "plural-exceptions", ca.PluralExceptions, "list of comma separated plural exception definitions in Type:PluralizedType form") - fs.StringVar(&ca.ApplyConfigurationPackage, "apply-configuration-package", ca.ApplyConfigurationPackage, "optional package of apply configurations, generated by applyconfiguration-gen, that are required to generate Apply functions for each type in the clientset. By default Apply functions are not generated.") +func (args *Args) AddFlags(fs *pflag.FlagSet, inputBase string) { + gvsBuilder := NewGroupVersionsBuilder(&args.Groups) + fs.StringVar(&args.OutputDir, "output-dir", "", + "the base directory under which to generate results") + fs.StringVar(&args.OutputPkg, "output-pkg", args.OutputPkg, + "the Go import-path of the generated results") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") + fs.Var(NewGVPackagesValue(gvsBuilder, nil), "input", + "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".") + fs.Var(NewGVTypesValue(&args.IncludedTypesOverrides, []string{}), "included-types-overrides", + "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.") + fs.Var(NewInputBasePathValue(gvsBuilder, inputBase), "input-base", + "base path to look for the api group.") + fs.StringVarP(&args.ClientsetName, "clientset-name", "n", args.ClientsetName, + "the name of the generated clientset package.") + fs.StringVarP(&args.ClientsetAPIPath, "clientset-api-path", "", args.ClientsetAPIPath, + "the value of default API HTTP path, starting with / and without trailing /.") + fs.BoolVar(&args.ClientsetOnly, "clientset-only", args.ClientsetOnly, + "when set, client-gen only generates the clientset shell, without generating the individual typed clients") + fs.BoolVar(&args.FakeClient, "fake-clientset", args.FakeClient, + "when set, client-gen will generate the fake clientset that can be used in tests") + fs.StringSliceVar(&args.PluralExceptions, "plural-exceptions", args.PluralExceptions, + "list of comma separated plural exception definitions in Type:PluralizedType form") + fs.StringVar(&args.ApplyConfigurationPackage, "apply-configuration-package", args.ApplyConfigurationPackage, + "optional package of apply configurations, generated by applyconfiguration-gen, that are required to generate Apply functions for each type in the clientset. By default Apply functions are not generated.") // support old flags - fs.SetNormalizeFunc(mapFlagName("clientset-path", "output-package", fs.GetNormalizeFunc())) + fs.SetNormalizeFunc(mapFlagName("clientset-path", "output-pkg", fs.GetNormalizeFunc())) } -func Validate(genericArgs *args.GeneratorArgs) error { - customArgs := genericArgs.CustomArgs.(*CustomArgs) - - if len(genericArgs.OutputPackagePath) == 0 { - return fmt.Errorf("output package cannot be empty") +func (args *Args) Validate() error { + if len(args.OutputDir) == 0 { + return fmt.Errorf("--output-dir must be specified") + } + if len(args.OutputPkg) == 0 { + return fmt.Errorf("--output-pkg must be specified") } - if len(customArgs.ClientsetName) == 0 { - return fmt.Errorf("clientset name cannot be empty") + if len(args.ClientsetName) == 0 { + return fmt.Errorf("--clientset-name must be specified") } - if len(customArgs.ClientsetAPIPath) == 0 { - return fmt.Errorf("clientset API path cannot be empty") + if len(args.ClientsetAPIPath) == 0 { + return fmt.Errorf("--clientset-api-path cannot be empty") } return nil } // GroupVersionPackages returns a map from GroupVersion to the package with the types.go. -func (ca *CustomArgs) GroupVersionPackages() map[types.GroupVersion]string { +func (args *Args) GroupVersionPackages() map[types.GroupVersion]string { res := map[types.GroupVersion]string{} - for _, pkg := range ca.Groups { + for _, pkg := range args.Groups { for _, v := range pkg.Versions { res[types.GroupVersion{Group: pkg.Group, Version: v.Version}] = v.Package } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go index 50d29a95..f5e7f406 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/args/gvpackages.go @@ -129,7 +129,9 @@ func (p *groupVersionsBuilder) update() error { versionPkg := types.PackageVersion{Package: path.Join(p.importBasePath, pth, gv.Group.NonEmpty(), gv.Version.String()), Version: gv.Version} if group, ok := seenGroups[gv.Group]; ok { - seenGroups[gv.Group].Versions = append(group.Versions, versionPkg) + vers := group.Versions + vers = append(vers, versionPkg) + seenGroups[gv.Group].Versions = vers } else { seenGroups[gv.Group] = &types.GroupVersions{ PackageName: gv.Group.NonEmpty(), diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index 6739a3fa..12a4afdb 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -18,21 +18,21 @@ limitations under the License. package generators import ( + "fmt" + "path" "path/filepath" "strings" - clientgenargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/args" "k8s.io/code-generator/cmd/client-gen/generators/fake" "k8s.io/code-generator/cmd/client-gen/generators/scheme" "k8s.io/code-generator/cmd/client-gen/generators/util" - "k8s.io/code-generator/cmd/client-gen/path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" codegennamer "k8s.io/code-generator/pkg/namer" - genutil "k8s.io/code-generator/pkg/util" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) @@ -128,33 +128,35 @@ func DefaultNameSystem() string { return "public" } -func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, apiPath string, srcTreePath string, inputPackage string, applyBuilderPackage string, boilerplate []byte) generator.Package { - groupVersionClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) - return &generator.DefaultPackage{ - PackageName: strings.ToLower(gv.Version.NonEmpty()), - PackagePath: groupVersionClientPackage, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// This package has the automatically generated typed clients. -`), - // GeneratorFunc returns a list of generators. Each generator makes a +func targetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetDir, clientsetPkg string, groupPkgName string, groupGoName string, apiPath string, inputPkg string, applyBuilderPkg string, boilerplate []byte) generator.Target { + subdir := []string{"typed", strings.ToLower(groupPkgName), strings.ToLower(gv.Version.NonEmpty())} + gvDir := filepath.Join(clientsetDir, filepath.Join(subdir...)) + gvPkg := path.Join(clientsetPkg, path.Join(subdir...)) + + return &generator.SimpleTarget{ + PkgName: strings.ToLower(gv.Version.NonEmpty()), + PkgPath: gvPkg, + PkgDir: gvDir, + HeaderComment: boilerplate, + PkgDocComment: []byte("// This package has the automatically generated typed clients.\n"), + // GeneratorsFunc returns a list of generators. Each generator makes a // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = []generator.Generator{ // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, + generator.GoGenerator{OutputFilename: "doc.go"}, } // Since we want a file per type that we generate a client for, we // have to provide a function for this. for _, t := range typeList { generators = append(generators, &genClientForType{ - DefaultGen: generator.DefaultGen{ - OptionalName: strings.ToLower(c.Namers["private"].Name(t)), + GoGenerator: generator.GoGenerator{ + OutputFilename: strings.ToLower(c.Namers["private"].Name(t)) + ".go", }, - outputPackage: groupVersionClientPackage, - inputPackage: inputPackage, - clientsetPackage: clientsetPackage, - applyConfigurationPackage: applyBuilderPackage, + outputPackage: gvPkg, + inputPackage: inputPkg, + clientsetPackage: clientsetPkg, + applyConfigurationPackage: applyBuilderPkg, group: gv.Group.NonEmpty(), version: gv.Version.String(), groupGoName: groupGoName, @@ -164,12 +166,12 @@ func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, cli } generators = append(generators, &genGroup{ - DefaultGen: generator.DefaultGen{ - OptionalName: groupPackageName + "_client", + GoGenerator: generator.GoGenerator{ + OutputFilename: groupPkgName + "_client.go", }, - outputPackage: groupVersionClientPackage, - inputPackage: inputPackage, - clientsetPackage: clientsetPackage, + outputPackage: gvPkg, + inputPackage: inputPkg, + clientsetPackage: clientsetPkg, group: gv.Group.NonEmpty(), version: gv.Version.String(), groupGoName: groupGoName, @@ -178,11 +180,11 @@ func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, cli imports: generator.NewImportTracker(), }) - expansionFileName := "generated_expansion" + expansionFileName := "generated_expansion.go" generators = append(generators, &genExpansion{ - groupPackagePath: filepath.Join(srcTreePath, groupVersionClientPackage), - DefaultGen: generator.DefaultGen{ - OptionalName: expansionFileName, + groupPackagePath: gvDir, + GoGenerator: generator.GoGenerator{ + OutputFilename: expansionFileName, }, types: typeList, }) @@ -195,29 +197,23 @@ func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, cli } } -func packageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { - return &generator.DefaultPackage{ - PackageName: customArgs.ClientsetName, - PackagePath: clientsetPackage, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// This package has the automatically generated clientset. -`), - // GeneratorFunc returns a list of generators. Each generator generates a +func targetForClientset(args *args.Args, clientsetDir, clientsetPkg string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Target { + return &generator.SimpleTarget{ + PkgName: args.ClientsetName, + PkgPath: clientsetPkg, + PkgDir: clientsetDir, + HeaderComment: boilerplate, + // GeneratorsFunc returns a list of generators. Each generator generates a // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = []generator.Generator{ - // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, - &genClientset{ - DefaultGen: generator.DefaultGen{ - OptionalName: "clientset", + GoGenerator: generator.GoGenerator{ + OutputFilename: "clientset.go", }, - groups: customArgs.Groups, + groups: args.Groups, groupGoNames: groupGoNames, - clientsetPackage: clientsetPackage, - outputPackage: customArgs.ClientsetName, + clientsetPackage: clientsetPkg, imports: generator.NewImportTracker(), }, } @@ -226,13 +222,14 @@ func packageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage } } -func packageForScheme(customArgs *clientgenargs.CustomArgs, clientsetPackage string, srcTreePath string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { - schemePackage := filepath.Join(clientsetPackage, "scheme") +func targetForScheme(args *args.Args, clientsetDir, clientsetPkg string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Target { + schemeDir := filepath.Join(clientsetDir, "scheme") + schemePkg := path.Join(clientsetPkg, "scheme") // create runtime.Registry for internal client because it has to know about group versions internalClient := false NextGroup: - for _, group := range customArgs.Groups { + for _, group := range args.Groups { for _, v := range group.Versions { if v.String() == "" { internalClient = true @@ -241,28 +238,27 @@ NextGroup: } } - return &generator.DefaultPackage{ - PackageName: "scheme", - PackagePath: schemePackage, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// This package contains the scheme of the automatically generated clientset. -`), - // GeneratorFunc returns a list of generators. Each generator generates a + return &generator.SimpleTarget{ + PkgName: "scheme", + PkgPath: schemePkg, + PkgDir: schemeDir, + HeaderComment: boilerplate, + PkgDocComment: []byte("// This package contains the scheme of the automatically generated clientset.\n"), + // GeneratorsFunc returns a list of generators. Each generator generates a // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = []generator.Generator{ // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, + generator.GoGenerator{OutputFilename: "doc.go"}, &scheme.GenScheme{ - DefaultGen: generator.DefaultGen{ - OptionalName: "register", + GoGenerator: generator.GoGenerator{ + OutputFilename: "register.go", }, - InputPackages: customArgs.GroupVersionPackages(), - OutputPackage: schemePackage, - OutputPath: filepath.Join(srcTreePath, schemePackage), - Groups: customArgs.Groups, + InputPackages: args.GroupVersionPackages(), + OutputPkg: schemePkg, + OutputPath: schemeDir, + Groups: args.Groups, GroupGoNames: groupGoNames, ImportTracker: generator.NewImportTracker(), CreateRegistry: internalClient, @@ -278,12 +274,12 @@ NextGroup: // first field (somegroup) as the name of the group in Go code, e.g. as the func name in a clientset. // // If the first field of the groupName is not unique within the clientset, use "// +groupName=unique -func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.CustomArgs) { +func applyGroupOverrides(universe types.Universe, args *args.Args) { // Create a map from "old GV" to "new GV" so we know what changes we need to make. changes := make(map[clientgentypes.GroupVersion]clientgentypes.GroupVersion) - for gv, inputDir := range customArgs.GroupVersionPackages() { - p := universe.Package(genutil.Vendorless(inputDir)) - if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + for gv, inputDir := range args.GroupVersionPackages() { + p := universe.Package(inputDir) + if override := gengo.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { newGV := clientgentypes.GroupVersion{ Group: clientgentypes.Group(override[0]), Version: gv.Version, @@ -292,9 +288,9 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust } } - // Modify customArgs.Groups based on the groupName overrides. - newGroups := make([]clientgentypes.GroupVersions, 0, len(customArgs.Groups)) - for _, gvs := range customArgs.Groups { + // Modify args.Groups based on the groupName overrides. + newGroups := make([]clientgentypes.GroupVersions, 0, len(args.Groups)) + for _, gvs := range args.Groups { gv := clientgentypes.GroupVersion{ Group: gvs.Group, Version: gvs.Versions[0].Version, // we only need a version, and the first will do @@ -312,37 +308,64 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust newGroups = append(newGroups, gvs) } } - customArgs.Groups = newGroups + args.Groups = newGroups } -// Packages makes the client package definition. -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +// Because we try to assemble inputs from an input-base and a set of +// group-version arguments, sometimes that comes in as a filesystem path. This +// function rewrites them all as their canonical Go import-paths. +// +// TODO: Change this tool to just take inputs as Go "patterns" like every other +// gengo tool, then extract GVs from those. +func sanitizePackagePaths(context *generator.Context, args *args.Args) error { + for i := range args.Groups { + pkg := &args.Groups[i] + for j := range pkg.Versions { + ver := &pkg.Versions[j] + input := ver.Package + p := context.Universe[input] + if p == nil || p.Name == "" { + pkgs, err := context.FindPackages(input) + if err != nil { + return fmt.Errorf("can't find input package %q: %w", input, err) + } + p = context.Universe[pkgs[0]] + if p == nil { + return fmt.Errorf("can't find input package %q in universe", input) + } + ver.Package = p.Path + } + } + } + return nil +} + +// GetTargets makes the client target definition. +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, "", gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs) - if !ok { - klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") - } - includedTypesOverrides := customArgs.IncludedTypesOverrides + includedTypesOverrides := args.IncludedTypesOverrides - applyGroupOverrides(context.Universe, customArgs) + if err := sanitizePackagePaths(context, args); err != nil { + klog.Fatalf("cannot sanitize inputs: %v", err) + } + applyGroupOverrides(context.Universe, args) gvToTypes := map[clientgentypes.GroupVersion][]*types.Type{} groupGoNames := make(map[clientgentypes.GroupVersion]string) - for gv, inputDir := range customArgs.GroupVersionPackages() { - p := context.Universe.Package(path.Vendorless(inputDir)) + for gv, inputDir := range args.GroupVersionPackages() { + p := context.Universe.Package(inputDir) // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as // the Go group identifier in CamelCase. It defaults groupGoNames[gv] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) - if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + if override := gengo.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { groupGoNames[gv] = namer.IC(override[0]) } - // Package are indexed with the vendor prefix stripped for n, t := range p.Types { // filter out types which are not included in user specified overrides. typesOverride, ok := includedTypesOverrides[gv] @@ -371,33 +394,43 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } } - var packageList []generator.Package - clientsetPackage := filepath.Join(arguments.OutputPackagePath, customArgs.ClientsetName) + clientsetDir := filepath.Join(args.OutputDir, args.ClientsetName) + clientsetPkg := path.Join(args.OutputPkg, args.ClientsetName) + + var targetList []generator.Target - packageList = append(packageList, packageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate)) - packageList = append(packageList, packageForScheme(customArgs, clientsetPackage, arguments.OutputBase, groupGoNames, boilerplate)) - if customArgs.FakeClient { - packageList = append(packageList, fake.PackageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate)) + targetList = append(targetList, + targetForClientset(args, clientsetDir, clientsetPkg, groupGoNames, boilerplate)) + targetList = append(targetList, + targetForScheme(args, clientsetDir, clientsetPkg, groupGoNames, boilerplate)) + if args.FakeClient { + targetList = append(targetList, + fake.TargetForClientset(args, clientsetDir, clientsetPkg, args.ApplyConfigurationPackage, groupGoNames, boilerplate)) } // If --clientset-only=true, we don't regenerate the individual typed clients. - if customArgs.ClientsetOnly { - return generator.Packages(packageList) + if args.ClientsetOnly { + return []generator.Target(targetList) } orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} - gvPackages := customArgs.GroupVersionPackages() - for _, group := range customArgs.Groups { + gvPackages := args.GroupVersionPackages() + for _, group := range args.Groups { for _, version := range group.Versions { gv := clientgentypes.GroupVersion{Group: group.Group, Version: version.Version} types := gvToTypes[gv] inputPath := gvPackages[gv] - packageList = append(packageList, packageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], customArgs.ClientsetAPIPath, arguments.OutputBase, inputPath, customArgs.ApplyConfigurationPackage, boilerplate)) - if customArgs.FakeClient { - packageList = append(packageList, fake.PackageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], inputPath, customArgs.ApplyConfigurationPackage, boilerplate)) + targetList = append(targetList, + targetForGroup( + gv, orderer.OrderTypes(types), clientsetDir, clientsetPkg, + group.PackageName, groupGoNames[gv], args.ClientsetAPIPath, + inputPath, args.ApplyConfigurationPackage, boilerplate)) + if args.FakeClient { + targetList = append(targetList, + fake.TargetForGroup(gv, orderer.OrderTypes(types), clientsetDir, clientsetPkg, group.PackageName, groupGoNames[gv], inputPath, args.ApplyConfigurationPackage, boilerplate)) } } } - return generator.Packages(packageList) + return targetList } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go index 17949091..74c98232 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go @@ -17,45 +17,48 @@ limitations under the License. package fake import ( + "path" "path/filepath" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/types" - clientgenargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/args" scheme "k8s.io/code-generator/cmd/client-gen/generators/scheme" "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" ) -func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, inputPackage string, applyBuilderPackage string, boilerplate []byte) generator.Package { - outputPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()), "fake") +func TargetForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetDir, clientsetPkg string, groupPkgName string, groupGoName string, inputPkg string, applyBuilderPackage string, boilerplate []byte) generator.Target { // TODO: should make this a function, called by here and in client-generator.go - realClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty())) - return &generator.DefaultPackage{ - PackageName: "fake", - PackagePath: outputPackage, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// Package fake has the automatically generated clients. -`), - // GeneratorFunc returns a list of generators. Each generator makes a + subdir := []string{"typed", strings.ToLower(groupPkgName), strings.ToLower(gv.Version.NonEmpty())} + outputDir := filepath.Join(clientsetDir, filepath.Join(subdir...), "fake") + outputPkg := path.Join(clientsetPkg, path.Join(subdir...), "fake") + realClientPkg := path.Join(clientsetPkg, path.Join(subdir...)) + + return &generator.SimpleTarget{ + PkgName: "fake", + PkgPath: outputPkg, + PkgDir: outputDir, + HeaderComment: boilerplate, + PkgDocComment: []byte("// Package fake has the automatically generated clients.\n"), + // GeneratorsFunc returns a list of generators. Each generator makes a // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = []generator.Generator{ // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, + generator.GoGenerator{OutputFilename: "doc.go"}, } // Since we want a file per type that we generate a client for, we // have to provide a function for this. for _, t := range typeList { generators = append(generators, &genFakeForType{ - DefaultGen: generator.DefaultGen{ - OptionalName: "fake_" + strings.ToLower(c.Namers["private"].Name(t)), + GoGenerator: generator.GoGenerator{ + OutputFilename: "fake_" + strings.ToLower(c.Namers["private"].Name(t)) + ".go", }, - outputPackage: outputPackage, - inputPackage: inputPackage, + outputPackage: outputPkg, + inputPackage: inputPkg, group: gv.Group.NonEmpty(), version: gv.Version.String(), groupGoName: groupGoName, @@ -66,11 +69,11 @@ func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, cli } generators = append(generators, &genFakeForGroup{ - DefaultGen: generator.DefaultGen{ - OptionalName: "fake_" + groupPackageName + "_client", + GoGenerator: generator.GoGenerator{ + OutputFilename: "fake_" + groupPkgName + "_client.go", }, - outputPackage: outputPackage, - realClientPackage: realClientPackage, + outputPackage: outputPkg, + realClientPackage: realClientPkg, group: gv.Group.NonEmpty(), version: gv.Version.String(), groupGoName: groupGoName, @@ -85,41 +88,40 @@ func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, cli } } -func PackageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package { - return &generator.DefaultPackage{ +func TargetForClientset(args *args.Args, clientsetDir, clientsetPkg string, applyConfigurationPkg string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Target { + return &generator.SimpleTarget{ // TODO: we'll generate fake clientset for different release in the future. // Package name and path are hard coded for now. - PackageName: "fake", - PackagePath: filepath.Join(clientsetPackage, "fake"), - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// This package has the automatically generated fake clientset. -`), - // GeneratorFunc returns a list of generators. Each generator generates a + PkgName: "fake", + PkgPath: path.Join(clientsetPkg, "fake"), + PkgDir: filepath.Join(clientsetDir, "fake"), + HeaderComment: boilerplate, + PkgDocComment: []byte("// This package has the automatically generated fake clientset.\n"), + // GeneratorsFunc returns a list of generators. Each generator generates a // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = []generator.Generator{ // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, + generator.GoGenerator{OutputFilename: "doc.go"}, &genClientset{ - DefaultGen: generator.DefaultGen{ - OptionalName: "clientset_generated", + GoGenerator: generator.GoGenerator{ + OutputFilename: "clientset_generated.go", }, - groups: customArgs.Groups, - groupGoNames: groupGoNames, - fakeClientsetPackage: clientsetPackage, - outputPackage: "fake", - imports: generator.NewImportTracker(), - realClientsetPackage: clientsetPackage, + groups: args.Groups, + groupGoNames: groupGoNames, + fakeClientsetPackage: clientsetPkg, + imports: generator.NewImportTracker(), + realClientsetPackage: clientsetPkg, + applyConfigurationPackage: applyConfigurationPkg, }, &scheme.GenScheme{ - DefaultGen: generator.DefaultGen{ - OptionalName: "register", + GoGenerator: generator.GoGenerator{ + OutputFilename: "register.go", }, - InputPackages: customArgs.GroupVersionPackages(), - OutputPackage: clientsetPackage, - Groups: customArgs.Groups, + InputPackages: args.GroupVersionPackages(), + OutputPkg: clientsetPkg, + Groups: args.Groups, GroupGoNames: groupGoNames, ImportTracker: generator.NewImportTracker(), PrivateScheme: true, diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go index cd731cb9..3bf7aa12 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go @@ -19,33 +19,33 @@ package fake import ( "fmt" "io" - "path/filepath" + "path" "strings" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) // genClientset generates a package for a clientset. type genClientset struct { - generator.DefaultGen + generator.GoGenerator groups []clientgentypes.GroupVersions groupGoNames map[clientgentypes.GroupVersion]string - fakeClientsetPackage string - outputPackage string + fakeClientsetPackage string // must be a Go import-path imports namer.ImportTracker clientsetGenerated bool // the import path of the generated real clientset. - realClientsetPackage string + realClientsetPackage string // must be a Go import-path + applyConfigurationPackage string } var _ generator.Generator = &genClientset{} func (g *genClientset) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ - "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "raw": namer.NewRawNamer(g.fakeClientsetPackage, g.imports), } } @@ -60,8 +60,8 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) for _, group := range g.groups { for _, version := range group.Versions { - groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) - fakeGroupClientPackage := filepath.Join(groupClientPackage, "fake") + groupClientPackage := path.Join(g.fakeClientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + fakeGroupClientPackage := path.Join(groupClientPackage, "fake") groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), groupClientPackage)) @@ -77,12 +77,15 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { "fakediscovery \"k8s.io/client-go/discovery/fake\"", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/watch", + "k8s.io/apimachinery/pkg/api/meta/testrestmapper", ) return } func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { + generateApply := len(g.applyConfigurationPackage) > 0 + // TODO: We actually don't need any type information to generate the clientset, // perhaps we can adapt the go2ild framework to this kind of usage. sw := generator.NewSnippetWriter(w, c, "$", "$") @@ -90,6 +93,13 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames) sw.Do(common, nil) + + if generateApply { + sw.Do(managedFieldsClientset, map[string]any{ + "newTypeConverter": types.Ref(g.applyConfigurationPackage, "NewTypeConverter"), + }) + } + sw.Do(checkImpl, nil) for _, group := range allGroups { @@ -108,11 +118,50 @@ func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Wr } // This part of code is version-independent, unchanging. + +var managedFieldsClientset = ` +// NewClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewClientset(objects ...runtime.Object) *Clientset { + o := testing.NewFieldManagedObjectTracker( + scheme, + codecs.UniversalDecoder(), + $.newTypeConverter|raw$(scheme), + ) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} +` + var common = ` // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go index 8f4d5785..d9c9b8ba 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_group.go @@ -19,21 +19,21 @@ package fake import ( "fmt" "io" - "path/filepath" + "path" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/code-generator/cmd/client-gen/generators/util" ) // genFakeForGroup produces a file for a group client, e.g. ExtensionsClient for the extension group. type genFakeForGroup struct { - generator.DefaultGen - outputPackage string - realClientPackage string + generator.GoGenerator + outputPackage string // must be a Go import-path + realClientPackage string // must be a Go import-path group string version string groupGoName string @@ -64,7 +64,7 @@ func (g *genFakeForGroup) Namers(c *generator.Context) namer.NameSystems { func (g *genFakeForGroup) Imports(c *generator.Context) (imports []string) { imports = g.imports.ImportLines() if len(g.types) != 0 { - imports = append(imports, fmt.Sprintf("%s \"%s\"", strings.ToLower(filepath.Base(g.realClientPackage)), g.realClientPackage)) + imports = append(imports, fmt.Sprintf("%s \"%s\"", strings.ToLower(path.Base(g.realClientPackage)), g.realClientPackage)) } return imports } @@ -90,7 +90,7 @@ func (g *genFakeForGroup) GenerateType(c *generator.Context, t *types.Type, w io "type": t, "GroupGoName": g.groupGoName, "Version": namer.IC(g.version), - "realClientPackage": strings.ToLower(filepath.Base(g.realClientPackage)), + "realClientPackage": strings.ToLower(path.Base(g.realClientPackage)), } if tags.NonNamespaced { sw.Do(getterImplNonNamespaced, wrapper) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index ec40fb4e..20499cf8 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -18,22 +18,23 @@ package fake import ( "io" - gopath "path" - "path/filepath" + "path" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "golang.org/x/text/cases" + "golang.org/x/text/language" + + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/code-generator/cmd/client-gen/generators/util" - "k8s.io/code-generator/cmd/client-gen/path" ) // genFakeForType produces a file for each top-level type. type genFakeForType struct { - generator.DefaultGen - outputPackage string + generator.GoGenerator + outputPackage string // Must be a Go import-path group string version string groupGoName string @@ -45,6 +46,8 @@ type genFakeForType struct { var _ generator.Generator = &genFakeForType{} +var titler = cases.Title(language.Und) + // Filter ignores all but one type because we're making a single file per type. func (g *genFakeForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } @@ -78,7 +81,7 @@ func genStatus(t *types.Type) bool { // hasObjectMeta returns true if the type has a ObjectMeta field. func hasObjectMeta(t *types.Type) bool { for _, m := range t.Members { - if m.Embedded == true && m.Name == "ObjectMeta" { + if m.Embedded && m.Name == "ObjectMeta" { return true } } @@ -88,91 +91,71 @@ func hasObjectMeta(t *types.Type) bool { // GenerateType makes the body of a file implementing the individual typed client for type t. func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - pkg := filepath.Base(t.Name.Package) + pkg := path.Base(t.Name.Package) tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) if err != nil { return err } - canonicalGroup := g.group - if canonicalGroup == "core" { - canonicalGroup = "" - } - - groupName := g.group - if g.group == "core" { - groupName = "" - } - - // allow user to define a group name that's different from the one parsed from the directory. - p := c.Universe.Package(path.Vendorless(g.inputPackage)) - if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { - groupName = override[0] - } const pkgClientGoTesting = "k8s.io/client-go/testing" m := map[string]interface{}{ - "type": t, - "inputType": t, - "resultType": t, - "subresourcePath": "", - "package": pkg, - "Package": namer.IC(pkg), - "namespaced": !tags.NonNamespaced, - "Group": namer.IC(g.group), - "GroupGoName": g.groupGoName, - "Version": namer.IC(g.version), - "group": canonicalGroup, - "groupName": groupName, - "version": g.version, - "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), - "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), - "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), - "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), - "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), - "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), - "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), - "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), - "GroupVersionResource": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"}), - "GroupVersionKind": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionKind"}), - "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), - "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), - "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), - "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), - - "NewRootListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootListAction"}), - "NewListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewListAction"}), - "NewRootGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetAction"}), - "NewGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetAction"}), - "NewRootDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteAction"}), - "NewRootDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteActionWithOptions"}), - "NewDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteAction"}), - "NewDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteActionWithOptions"}), - "NewRootDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteCollectionAction"}), - "NewDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteCollectionAction"}), - "NewRootUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateAction"}), - "NewUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateAction"}), - "NewRootCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateAction"}), - "NewCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateAction"}), - "NewRootWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootWatchAction"}), - "NewWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewWatchAction"}), - "NewCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateSubresourceAction"}), - "NewRootCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateSubresourceAction"}), - "NewUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateSubresourceAction"}), - "NewGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetSubresourceAction"}), - "NewRootGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetSubresourceAction"}), - "NewRootUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateSubresourceAction"}), - "NewRootPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchAction"}), - "NewPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchAction"}), - "NewRootPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchSubresourceAction"}), - "NewPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchSubresourceAction"}), - "ExtractFromListOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "ExtractFromListOptions"}), + "type": t, + "inputType": t, + "resultType": t, + "subresourcePath": "", + "package": pkg, + "Package": namer.IC(pkg), + "namespaced": !tags.NonNamespaced, + "Group": namer.IC(g.group), + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "version": g.version, + "SchemeGroupVersion": c.Universe.Type(types.Name{Package: t.Name.Package, Name: "SchemeGroupVersion"}), + "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), + "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), + "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), + "Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + + "NewRootListActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootListActionWithOptions"}), + "NewListActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewListActionWithOptions"}), + "NewRootGetActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetActionWithOptions"}), + "NewGetActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetActionWithOptions"}), + "NewRootDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteActionWithOptions"}), + "NewDeleteActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteActionWithOptions"}), + "NewRootDeleteCollectionActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteCollectionActionWithOptions"}), + "NewDeleteCollectionActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteCollectionActionWithOptions"}), + "NewRootUpdateActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateActionWithOptions"}), + "NewUpdateActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateActionWithOptions"}), + "NewRootCreateActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateActionWithOptions"}), + "NewCreateActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateActionWithOptions"}), + "NewRootWatchActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootWatchActionWithOptions"}), + "NewWatchActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewWatchActionWithOptions"}), + "NewCreateSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateSubresourceActionWithOptions"}), + "NewRootCreateSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateSubresourceActionWithOptions"}), + "NewUpdateSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateSubresourceActionWithOptions"}), + "NewGetSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetSubresourceActionWithOptions"}), + "NewRootGetSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetSubresourceActionWithOptions"}), + "NewRootUpdateSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateSubresourceActionWithOptions"}), + "NewRootPatchActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchActionWithOptions"}), + "NewPatchActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchActionWithOptions"}), + "NewRootPatchSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchSubresourceActionWithOptions"}), + "NewPatchSubresourceActionWithOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchSubresourceActionWithOptions"}), + "ExtractFromListOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "ExtractFromListOptions"}), } generateApply := len(g.applyConfigurationPackage) > 0 if generateApply { // Generated apply builder type references required for generated Apply function _, gvString := util.ParsePathGroupVersion(g.inputPackage) - m["inputApplyConfig"] = types.Ref(gopath.Join(g.applyConfigurationPackage, gvString), t.Name.Name+"ApplyConfiguration") + m["inputApplyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, gvString), t.Name.Name+"ApplyConfiguration") } if tags.NonNamespaced { @@ -256,7 +239,7 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. m["resultType"] = &resultType m["subresourcePath"] = e.SubResourcePath if e.HasVerb("apply") { - m["inputApplyConfig"] = types.Ref(gopath.Join(g.applyConfigurationPackage, inputGVString), inputType.Name.Name+"ApplyConfiguration") + m["inputApplyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, inputGVString), inputType.Name.Name+"ApplyConfiguration") } if e.HasVerb("get") { @@ -319,7 +302,7 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io. // TODO: Make the verbs in templates parametrized so the strings.Replace() is // not needed. func adjustTemplate(name, verbType, template string) string { - return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1) + return strings.ReplaceAll(template, " "+titler.String(verbType), " "+name) } // template for the struct that implements the type's interface @@ -340,21 +323,22 @@ type Fake$.type|publicPlural$ struct { ` var resource = ` -var $.type|allLowercasePlural$Resource = $.GroupVersionResource|raw${Group: "$.groupName$", Version: "$.version$", Resource: "$.type|resource$"} +var $.type|allLowercasePlural$Resource = $.SchemeGroupVersion|raw$.WithResource("$.type|resource$") ` var kind = ` -var $.type|allLowercasePlural$Kind = $.GroupVersionKind|raw${Group: "$.groupName$", Version: "$.version$", Kind: "$.type|singularKind$"} +var $.type|allLowercasePlural$Kind = $.SchemeGroupVersion|raw$.WithKind("$.type|singularKind$") ` var listTemplate = ` // List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. func (c *Fake$.type|publicPlural$) List(ctx context.Context, opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { + emptyResult := &$.type|raw$List{} obj, err := c.Fake. - $if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{}) - $else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$ + $if .namespaced$Invokes($.NewListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), emptyResult) + $else$Invokes($.NewRootListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.type|raw$List), err } @@ -363,11 +347,12 @@ func (c *Fake$.type|publicPlural$) List(ctx context.Context, opts $.ListOptions| var listUsingOptionsTemplate = ` // List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors. func (c *Fake$.type|publicPlural$) List(ctx context.Context, opts $.ListOptions|raw$) (result *$.type|raw$List, err error) { + emptyResult := &$.type|raw$List{} obj, err := c.Fake. - $if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{}) - $else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$ + $if .namespaced$Invokes($.NewListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), emptyResult) + $else$Invokes($.NewRootListActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := $.ExtractFromListOptions|raw$(opts) @@ -387,11 +372,12 @@ func (c *Fake$.type|publicPlural$) List(ctx context.Context, opts $.ListOptions| var getTemplate = ` // Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. func (c *Fake$.type|publicPlural$) Get(ctx context.Context, name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewGetAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.resultType|raw${}) - $else$Invokes($.NewRootGetAction|raw$($.type|allLowercasePlural$Resource, name), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewGetActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, name, options), emptyResult) + $else$Invokes($.NewRootGetActionWithOptions|raw$($.type|allLowercasePlural$Resource, name, options), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -400,11 +386,12 @@ func (c *Fake$.type|publicPlural$) Get(ctx context.Context, name string, options var getSubresourceTemplate = ` // Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. func (c *Fake$.type|publicPlural$) Get(ctx context.Context, $.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${}) - $else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewGetSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name, options), emptyResult) + $else$Invokes($.NewRootGetSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name, options), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -423,8 +410,8 @@ func (c *Fake$.type|publicPlural$) Delete(ctx context.Context, name string, opts var deleteCollectionTemplate = ` // DeleteCollection deletes a collection of objects. func (c *Fake$.type|publicPlural$) DeleteCollection(ctx context.Context, opts $.DeleteOptions|raw$, listOpts $.ListOptions|raw$) error { - $if .namespaced$action := $.NewDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, c.ns, listOpts) - $else$action := $.NewRootDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, listOpts) + $if .namespaced$action := $.NewDeleteCollectionActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, opts, listOpts) + $else$action := $.NewRootDeleteCollectionActionWithOptions|raw$($.type|allLowercasePlural$Resource, opts, listOpts) $end$ _, err := c.Fake.Invokes(action, &$.type|raw$List{}) return err @@ -433,11 +420,12 @@ func (c *Fake$.type|publicPlural$) DeleteCollection(ctx context.Context, opts $. var createTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewCreateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) - $else$Invokes($.NewRootCreateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewCreateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$, opts), emptyResult) + $else$Invokes($.NewRootCreateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$, opts), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -446,11 +434,12 @@ func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.inputType|priva var createSubresourceTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$), &$.resultType|raw${}) - $else$Invokes($.NewRootCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewCreateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$, opts), emptyResult) + $else$Invokes($.NewRootCreateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", $.inputType|private$, opts), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -459,11 +448,12 @@ func (c *Fake$.type|publicPlural$) Create(ctx context.Context, $.type|private$Na var updateTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewUpdateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${}) - $else$Invokes($.NewRootUpdateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewUpdateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$, opts), emptyResult) + $else$Invokes($.NewRootUpdateActionWithOptions|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$, opts), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -472,11 +462,12 @@ func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.inputType|priva var updateSubresourceTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", c.ns, $.inputType|private$), &$.inputType|raw${}) - $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", c.ns, $.inputType|private$, opts), &$.inputType|raw${}) + $else$Invokes($.NewRootUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$, opts), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -485,12 +476,13 @@ func (c *Fake$.type|publicPlural$) Update(ctx context.Context, $.type|private$Na var updateStatusTemplate = ` // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *Fake$.type|publicPlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (*$.type|raw$, error) { +func (c *Fake$.type|publicPlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (result *$.type|raw$, err error) { + emptyResult := &$.type|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", c.ns, $.type|private$), &$.type|raw${}) - $else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", $.type|private$), &$.type|raw${})$end$ + $if .namespaced$Invokes($.NewUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "status", c.ns, $.type|private$, opts), emptyResult) + $else$Invokes($.NewRootUpdateSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, "status", $.type|private$, opts), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.type|raw$), err } @@ -500,19 +492,20 @@ var watchTemplate = ` // Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. func (c *Fake$.type|publicPlural$) Watch(ctx context.Context, opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { return c.Fake. - $if .namespaced$InvokesWatch($.NewWatchAction|raw$($.type|allLowercasePlural$Resource, c.ns, opts)) - $else$InvokesWatch($.NewRootWatchAction|raw$($.type|allLowercasePlural$Resource, opts))$end$ + $if .namespaced$InvokesWatch($.NewWatchActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, opts)) + $else$InvokesWatch($.NewRootWatchActionWithOptions|raw$($.type|allLowercasePlural$Resource, opts))$end$ } ` var patchTemplate = ` // Patch applies the patch and returns the patched $.resultType|private$. func (c *Fake$.type|publicPlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, subresources... ), &$.resultType|raw${}) - $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, pt, data, subresources...), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, opts, subresources... ), emptyResult) + $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, name, pt, data, opts, subresources...), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -528,15 +521,16 @@ func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.inputType|privat if err != nil { return nil, err } - name := $.inputType|private$.Name + name := $.inputType|private$.Name if name == nil { return nil, fmt.Errorf("$.inputType|private$.Name must be provided to Apply") } + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, *name, $.ApplyPatchType|raw$, data), &$.resultType|raw${}) - $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, *name, $.ApplyPatchType|raw$, data), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions()), emptyResult) + $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions()), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -553,15 +547,16 @@ func (c *Fake$.type|publicPlural$) ApplyStatus(ctx context.Context, $.inputType| if err != nil { return nil, err } - name := $.inputType|private$.Name + name := $.inputType|private$.Name if name == nil { return nil, fmt.Errorf("$.inputType|private$.Name must be provided to Apply") } + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, *name, $.ApplyPatchType|raw$, data, "status"), &$.resultType|raw${}) - $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, *name, $.ApplyPatchType|raw$, data, "status"), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "status"), emptyResult) + $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, *name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "status"), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } @@ -578,11 +573,12 @@ func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.type|private$Nam if err != nil { return nil, err } + emptyResult := &$.resultType|raw${} obj, err := c.Fake. - $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, $.type|private$Name, $.ApplyPatchType|raw$, data, "status"), &$.resultType|raw${}) - $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, $.ApplyPatchType|raw$, data, "status"), &$.resultType|raw${})$end$ + $if .namespaced$Invokes($.NewPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, c.ns, $.type|private$Name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "$.inputType|private$"), emptyResult) + $else$Invokes($.NewRootPatchSubresourceActionWithOptions|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, $.ApplyPatchType|raw$, data, opts.ToPatchOptions(), "$.inputType|private$"), emptyResult)$end$ if obj == nil { - return nil, err + return emptyResult, err } return obj.(*$.resultType|raw$), err } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go index 6bf1ca37..0c043cee 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_clientset.go @@ -19,22 +19,21 @@ package generators import ( "fmt" "io" - "path/filepath" + "path" "strings" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) // genClientset generates a package for a clientset. type genClientset struct { - generator.DefaultGen + generator.GoGenerator groups []clientgentypes.GroupVersions groupGoNames map[clientgentypes.GroupVersion]string - clientsetPackage string - outputPackage string + clientsetPackage string // must be a Go import-path imports namer.ImportTracker clientsetGenerated bool } @@ -43,7 +42,7 @@ var _ generator.Generator = &genClientset{} func (g *genClientset) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ - "raw": namer.NewRawNamer(g.outputPackage, g.imports), + "raw": namer.NewRawNamer(g.clientsetPackage, g.imports), } } @@ -58,7 +57,7 @@ func (g *genClientset) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) for _, group := range g.groups { for _, version := range group.Versions { - typedClientPath := filepath.Join(g.clientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) + typedClientPath := path.Join(g.clientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty())) groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]) imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), typedClientPath)) } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go index f47c079e..5971cc5b 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_expansion.go @@ -22,13 +22,13 @@ import ( "path/filepath" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/types" ) // genExpansion produces a file for a group client, e.g. ExtensionsClient for the extension group. type genExpansion struct { - generator.DefaultGen + generator.GoGenerator groupPackagePath string // types in a group types []*types.Type diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go index 30284990..83b13b11 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_group.go @@ -18,19 +18,19 @@ package generators import ( "io" - "path/filepath" + "path" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/code-generator/cmd/client-gen/generators/util" - "k8s.io/code-generator/cmd/client-gen/path" ) // genGroup produces a file for a group client, e.g. ExtensionsClient for the extension group. type genGroup struct { - generator.DefaultGen + generator.GoGenerator outputPackage string group string version string @@ -40,7 +40,7 @@ type genGroup struct { types []*types.Type imports namer.ImportTracker inputPackage string - clientsetPackage string + clientsetPackage string // must be a Go import-path // If the genGroup has been called. This generator should only execute once. called bool } @@ -64,38 +64,32 @@ func (g *genGroup) Namers(c *generator.Context) namer.NameSystems { func (g *genGroup) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) - imports = append(imports, filepath.Join(g.clientsetPackage, "scheme")) + imports = append(imports, path.Join(g.clientsetPackage, "scheme")) return } func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - apiPath := func(group string) string { - if group == "core" { - return `"/api"` - } - return `"` + g.apiPath + `"` - } - - groupName := g.group - if g.group == "core" { - groupName = "" - } // allow user to define a group name that's different from the one parsed from the directory. - p := c.Universe.Package(path.Vendorless(g.inputPackage)) - if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + p := c.Universe.Package(g.inputPackage) + groupName := g.group + if override := gengo.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { groupName = override[0] } + apiPath := `"` + g.apiPath + `"` + if groupName == "" { + apiPath = `"/api"` + } + m := map[string]interface{}{ - "group": g.group, "version": g.version, "groupName": groupName, "GroupGoName": g.groupGoName, "Version": namer.IC(g.version), "types": g.types, - "apiPath": apiPath(g.group), + "apiPath": apiPath, "schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}), "runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}), "restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}), @@ -104,7 +98,7 @@ func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer "RESTHTTPClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "HTTPClientFor"}), "restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}), "restRESTClientForConfigAndClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientForConfigAndClient"}), - "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: path.Vendorless(g.inputPackage), Name: "SchemeGroupVersion"}), + "SchemeGroupVersion": c.Universe.Variable(types.Name{Package: g.inputPackage, Name: "SchemeGroupVersion"}), } sw.Do(groupInterfaceTemplate, m) sw.Do(groupClientTemplate, m) diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go index fe63dd19..7361c48f 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go @@ -19,23 +19,24 @@ package generators import ( "io" "path" - "path/filepath" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/code-generator/cmd/client-gen/generators/util" ) // genClientForType produces a file for each top-level type. type genClientForType struct { - generator.DefaultGen - outputPackage string + generator.GoGenerator + outputPackage string // must be a Go import-path inputPackage string - clientsetPackage string - applyConfigurationPackage string + clientsetPackage string // must be a Go import-path + applyConfigurationPackage string // must be a Go import-path group string version string groupGoName string @@ -45,6 +46,8 @@ type genClientForType struct { var _ generator.Generator = &genClientForType{} +var titler = cases.Title(language.Und) + // Filter ignores all but one type because we're making a single file per type. func (g *genClientForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch @@ -81,7 +84,7 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i defaultVerbTemplates := buildDefaultVerbTemplates(generateApply) subresourceDefaultVerbTemplates := buildSubresourceDefaultVerbTemplates(generateApply) sw := generator.NewSnippetWriter(w, c, "$", "$") - pkg := filepath.Base(t.Name.Package) + pkg := path.Base(t.Name.Package) tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) if err != nil { return err @@ -120,9 +123,9 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i } var updatedVerbtemplate string if _, exists := subresourceDefaultVerbTemplates[e.VerbType]; e.IsSubresource() && exists { - updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(subresourceDefaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(") + updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(subresourceDefaultVerbTemplates[e.VerbType], titler.String(e.VerbType)+"(") } else { - updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(defaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(") + updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(defaultVerbTemplates[e.VerbType], titler.String(e.VerbType)+"(") } extendedMethod := extendedInterfaceMethod{ template: updatedVerbtemplate, @@ -145,30 +148,38 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i extendedMethods = append(extendedMethods, extendedMethod) } m := map[string]interface{}{ - "type": t, - "inputType": t, - "resultType": t, - "package": pkg, - "Package": namer.IC(pkg), - "namespaced": !tags.NonNamespaced, - "Group": namer.IC(g.group), - "subresource": false, - "subresourcePath": "", - "GroupGoName": g.groupGoName, - "Version": namer.IC(g.version), - "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), - "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), - "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), - "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), - "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), - "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), - "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), - "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), - "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), - "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), - "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), - "schemeParameterCodec": c.Universe.Variable(types.Name{Package: filepath.Join(g.clientsetPackage, "scheme"), Name: "ParameterCodec"}), - "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + "type": t, + "inputType": t, + "resultType": t, + "package": pkg, + "Package": namer.IC(pkg), + "namespaced": !tags.NonNamespaced, + "Group": namer.IC(g.group), + "subresource": false, + "subresourcePath": "", + "GroupGoName": g.groupGoName, + "Version": namer.IC(g.version), + "CreateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "CreateOptions"}), + "DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}), + "GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}), + "ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}), + "PatchOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "PatchOptions"}), + "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}), + "UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}), + "PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}), + "ApplyPatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "ApplyPatchType"}), + "watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}), + "RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}), + "schemeParameterCodec": c.Universe.Variable(types.Name{Package: path.Join(g.clientsetPackage, "scheme"), Name: "ParameterCodec"}), + "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}), + "resourceVersionMatchNotOlderThan": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ResourceVersionMatchNotOlderThan"}), + "CheckListFromCacheDataConsistencyIfRequested": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/consistencydetector", Name: "CheckListFromCacheDataConsistencyIfRequested"}), + "CheckWatchListFromCacheDataConsistencyIfRequested": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/consistencydetector", Name: "CheckWatchListFromCacheDataConsistencyIfRequested"}), + "PrepareWatchListOptionsFromListOptions": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/watchlist", Name: "PrepareWatchListOptionsFromListOptions"}), + "Client": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "Client"}), + "ClientWithList": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "ClientWithList"}), + "ClientWithApply": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "ClientWithApply"}), + "ClientWithListAndApply": c.Universe.Type(types.Name{Package: "k8s.io/client-go/gentype", Name: "ClientWithListAndApply"}), } if generateApply { @@ -203,53 +214,31 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i } sw.Do(interfaceTemplate4, m) + structNamespaced := namespaced if tags.NonNamespaced { - sw.Do(structNonNamespaced, m) - sw.Do(newStructNonNamespaced, m) - } else { - sw.Do(structNamespaced, m) - sw.Do(newStructNamespaced, m) + structNamespaced = nonNamespaced } if tags.NoVerbs { + sw.Do(structType[noList|noApply], m) + sw.Do(newStruct[structNamespaced|noList|noApply], m) + return sw.Error() } - if tags.HasVerb("get") { - sw.Do(getTemplate, m) - } + listableOrAppliable := noList | noApply + if tags.HasVerb("list") { - sw.Do(listTemplate, m) - } - if tags.HasVerb("watch") { - sw.Do(watchTemplate, m) + listableOrAppliable |= withList } - if tags.HasVerb("create") { - sw.Do(createTemplate, m) - } - if tags.HasVerb("update") { - sw.Do(updateTemplate, m) - } - if tags.HasVerb("updateStatus") { - sw.Do(updateStatusTemplate, m) - } - if tags.HasVerb("delete") { - sw.Do(deleteTemplate, m) - } - if tags.HasVerb("deleteCollection") { - sw.Do(deleteCollectionTemplate, m) - } - if tags.HasVerb("patch") { - sw.Do(patchTemplate, m) - } if tags.HasVerb("apply") && generateApply { - sw.Do(applyTemplate, m) - } - if tags.HasVerb("applyStatus") && generateApply { - sw.Do(applyStatusTemplate, m) + listableOrAppliable |= withApply } + sw.Do(structType[listableOrAppliable], m) + sw.Do(newStruct[structNamespaced|listableOrAppliable], m) + // generate expansion methods for _, e := range tags.Extensions { if e.HasVerb("apply") && !generateApply { @@ -295,6 +284,8 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i sw.Do(adjustTemplate(e.VerbName, e.VerbType, listSubresourceTemplate), m) } else { sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m) + sw.Do(adjustTemplate(e.VerbName, e.VerbType, privateListTemplate), m) + sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchListTemplate), m) } } @@ -345,7 +336,7 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i // TODO: Make the verbs in templates parametrized so the strings.Replace() is // not needed. func adjustTemplate(name, verbType, template string) string { - return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1) + return strings.ReplaceAll(template, " "+titler.String(verbType), " "+name) } func generateInterface(defaultVerbTemplates map[string]string, tags util.Tags) string { @@ -374,9 +365,10 @@ func buildSubresourceDefaultVerbTemplates(generateApply bool) map[string]string func buildDefaultVerbTemplates(generateApply bool) map[string]string { m := map[string]string{ - "create": `Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, - "update": `Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, - "updateStatus": `UpdateStatus(ctx context.Context, $.inputType|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (*$.type|raw$, error)`, + "create": `Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (*$.resultType|raw$, error)`, + "update": `Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`, + "updateStatus": `// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +UpdateStatus(ctx context.Context, $.inputType|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (*$.type|raw$, error)`, "delete": `Delete(ctx context.Context, name string, opts $.DeleteOptions|raw$) error`, "deleteCollection": `DeleteCollection(ctx context.Context, opts $.DeleteOptions|raw$, listOpts $.ListOptions|raw$) error`, "get": `Get(ctx context.Context, name string, opts $.GetOptions|raw$) (*$.resultType|raw$, error)`, @@ -386,7 +378,8 @@ func buildDefaultVerbTemplates(generateApply bool) map[string]string { } if generateApply { m["apply"] = `Apply(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)` - m["applyStatus"] = `ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)` + m["applyStatus"] = `// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)` } return m } @@ -418,51 +411,204 @@ var interfaceTemplate4 = ` } ` -// template for the struct that implements the type's interface -var structNamespaced = ` -// $.type|privatePlural$ implements $.type|public$Interface -type $.type|privatePlural$ struct { - client $.RESTClientInterface|raw$ - ns string -} -` - -// template for the struct that implements the type's interface -var structNonNamespaced = ` -// $.type|privatePlural$ implements $.type|public$Interface -type $.type|privatePlural$ struct { - client $.RESTClientInterface|raw$ -} -` +// struct and constructor variants +const ( + // The following values are bits in a bitmask. + // The values which can be set indicate namespace support, list support, and apply support; + // to make the declarations easier to read (like a truth table), corresponding zero-values + // are also declared. + namespaced = 0 + noList = 0 + noApply = 0 + nonNamespaced = 1 << iota + withList + withApply +) -var newStructNamespaced = ` -// new$.type|publicPlural$ returns a $.type|publicPlural$ -func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { - return &$.type|privatePlural${ - client: c.RESTClient(), - ns: namespace, +// The following string slices are similar to maps, but with combinable keys used as indices. +// Each entry defines whether it supports lists and/or apply, and if namespacedness matters, +// namespaces; each bit is then toggled: +// * noList, noApply: index 0; +// * withList, noApply: index 2; +// * noList, withApply: index 4; +// * withList, withApply: index 6. +// When namespacedness matters, the namespaced variants are the same as the above, and +// the non-namespaced variants are offset by 1. +// Go enforces index unicity in these kinds of declarations. + +// struct declarations +// Namespacedness does not matter +var structType = []string{ + noList | noApply: ` + // $.type|privatePlural$ implements $.type|public$Interface + type $.type|privatePlural$ struct { + *$.Client|raw$[*$.resultType|raw$] + } + `, + withList | noApply: ` + // $.type|privatePlural$ implements $.type|public$Interface + type $.type|privatePlural$ struct { + *$.ClientWithList|raw$[*$.resultType|raw$, *$.resultType|raw$List] + } + `, + noList | withApply: ` + // $.type|privatePlural$ implements $.type|public$Interface + type $.type|privatePlural$ struct { + *$.ClientWithApply|raw$[*$.resultType|raw$, *$.inputApplyConfig|raw$] + } + `, + withList | withApply: ` + // $.type|privatePlural$ implements $.type|public$Interface + type $.type|privatePlural$ struct { + *$.ClientWithListAndApply|raw$[*$.resultType|raw$, *$.resultType|raw$List, *$.inputApplyConfig|raw$] + } + `, +} + +// Constructors for the struct, in all variants +// Namespacedness matters +var newStruct = []string{ + namespaced | noList | noApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClient[*$.resultType|raw$]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + namespace, + func() *$.resultType|raw$ { return &$.resultType|raw${} }), + } } -} -` - -var newStructNonNamespaced = ` -// new$.type|publicPlural$ returns a $.type|publicPlural$ -func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { - return &$.type|privatePlural${ - client: c.RESTClient(), + `, + namespaced | noList | withApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClientWithApply[*$.resultType|raw$, *$.inputApplyConfig|raw$]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + namespace, + func() *$.resultType|raw$ { return &$.resultType|raw${} }), + } + } + `, + namespaced | withList | noApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClientWithList[*$.resultType|raw$, *$.resultType|raw$List]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + namespace, + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + } + } + `, + namespaced | withList | withApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClientWithListAndApply[*$.resultType|raw$, *$.resultType|raw$List, *$.inputApplyConfig|raw$]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + namespace, + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + } } + `, + nonNamespaced | noList | noApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClient[*$.resultType|raw$]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + "", + func() *$.resultType|raw$ { return &$.resultType|raw${} }), + } + } + `, + nonNamespaced | noList | withApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClientWithApply[*$.resultType|raw$, *$.inputApplyConfig|raw$]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + "", + func() *$.resultType|raw$ { return &$.resultType|raw${} }), + } + } + `, + nonNamespaced | withList | noApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClientWithList[*$.resultType|raw$, *$.resultType|raw$List]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + "", + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + } + } + `, + nonNamespaced | withList | withApply: ` + // new$.type|publicPlural$ returns a $.type|publicPlural$ + func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ { + return &$.type|privatePlural${ + gentype.NewClientWithListAndApply[*$.resultType|raw$, *$.resultType|raw$List, *$.inputApplyConfig|raw$]( + "$.type|resource$", + c.RESTClient(), + $.schemeParameterCodec|raw$, + "", + func() *$.resultType|raw$ { return &$.resultType|raw${} }, + func() *$.resultType|raw$List { return &$.resultType|raw$List{} }), + } + } + `, } -` + var listTemplate = ` // List takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. -func (c *$.type|privatePlural$) List(ctx context.Context, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { +func (c *$.type|privatePlural$) List(ctx context.Context, opts $.ListOptions|raw$) (*$.resultType|raw$List, error) { + if watchListOptions, hasWatchListOptionsPrepared, watchListOptionsErr := $.PrepareWatchListOptionsFromListOptions|raw$(opts); watchListOptionsErr != nil { + klog.Warningf("Failed preparing watchlist options for $.type|resource$, falling back to the standard LIST semantics, err = %v", watchListOptionsErr ) + } else if hasWatchListOptionsPrepared { + result, err := c.watchList(ctx, watchListOptions) + if err == nil { + $.CheckWatchListFromCacheDataConsistencyIfRequested|raw$(ctx, "watchlist request for $.type|resource$", c.list, opts, result) + return result, nil + } + klog.Warningf("The watchlist request for $.type|resource$ ended with an error, falling back to the standard LIST semantics, err = %v", err) + } + result, err := c.list(ctx, opts) + if err == nil { + $.CheckListFromCacheDataConsistencyIfRequested|raw$(ctx, "list request for $.type|resource$", c.list, opts, result) + } + return result, err +} +` + +var privateListTemplate = ` +// list takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors. +func (c *$.type|privatePlural$) list(ctx context.Context, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil{ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } result = &$.resultType|raw$List{} - err = c.client.Get(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Get(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). Timeout(timeout). @@ -480,8 +626,8 @@ func (c *$.type|privatePlural$) List(ctx context.Context, $.type|private$Name st timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } result = &$.resultType|raw$List{} - err = c.client.Get(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Get(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). @@ -497,8 +643,8 @@ var getTemplate = ` // Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any. func (c *$.type|privatePlural$) Get(ctx context.Context, name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} - err = c.client.Get(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Get(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(name). VersionedParams(&options, $.schemeParameterCodec|raw$). @@ -512,8 +658,8 @@ var getSubresourceTemplate = ` // Get takes name of the $.type|private$, and returns the corresponding $.resultType|raw$ object, and an error if there is any. func (c *$.type|privatePlural$) Get(ctx context.Context, $.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} - err = c.client.Get(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Get(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). @@ -527,8 +673,8 @@ func (c *$.type|privatePlural$) Get(ctx context.Context, $.type|private$Name str var deleteTemplate = ` // Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs. func (c *$.type|privatePlural$) Delete(ctx context.Context, name string, opts $.DeleteOptions|raw$) error { - return c.client.Delete(). - $if .namespaced$Namespace(c.ns).$end$ + return c.GetClient().Delete(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(name). Body(&opts). @@ -537,30 +683,12 @@ func (c *$.type|privatePlural$) Delete(ctx context.Context, name string, opts $. } ` -var deleteCollectionTemplate = ` -// DeleteCollection deletes a collection of objects. -func (c *$.type|privatePlural$) DeleteCollection(ctx context.Context, opts $.DeleteOptions|raw$, listOpts $.ListOptions|raw$) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil{ - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - $if .namespaced$Namespace(c.ns).$end$ - Resource("$.type|resource$"). - VersionedParams(&listOpts, $.schemeParameterCodec|raw$). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} -` - var createSubresourceTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *$.type|privatePlural$) Create(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} - err = c.client.Post(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Post(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). @@ -576,8 +704,8 @@ var createTemplate = ` // Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *$.type|privatePlural$) Create(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.CreateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} - err = c.client.Post(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Post(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). Body($.inputType|private$). @@ -591,8 +719,8 @@ var updateSubresourceTemplate = ` // Update takes the top resource name and the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *$.type|privatePlural$) Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} - err = c.client.Put(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Put(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). @@ -608,8 +736,8 @@ var updateTemplate = ` // Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any. func (c *$.type|privatePlural$) Update(ctx context.Context, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} - err = c.client.Put(). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Put(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.inputType|private$.Name). VersionedParams(&opts, $.schemeParameterCodec|raw$). @@ -620,24 +748,6 @@ func (c *$.type|privatePlural$) Update(ctx context.Context, $.inputType|private$ } ` -var updateStatusTemplate = ` -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *$.type|privatePlural$) UpdateStatus(ctx context.Context, $.type|private$ *$.type|raw$, opts $.UpdateOptions|raw$) (result *$.type|raw$, err error) { - result = &$.type|raw${} - err = c.client.Put(). - $if .namespaced$Namespace(c.ns).$end$ - Resource("$.type|resource$"). - Name($.type|private$.Name). - SubResource("status"). - VersionedParams(&opts, $.schemeParameterCodec|raw$). - Body($.type|private$). - Do(ctx). - Into(result) - return -} -` - var watchTemplate = ` // Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$. func (c *$.type|privatePlural$) Watch(ctx context.Context, opts $.ListOptions|raw$) ($.watchInterface|raw$, error) { @@ -646,12 +756,30 @@ func (c *$.type|privatePlural$) Watch(ctx context.Context, opts $.ListOptions|ra timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } opts.Watch = true - return c.client.Get(). + return c.GetClient().Get(). + $if .namespaced$Namespace(c.GetNamespace()).$end$ + VersionedParams(&opts, $.schemeParameterCodec|raw$). + Timeout(timeout). + Watch(ctx) +} +` + +var watchListTemplate = ` +// watchList establishes a watch stream with the server and returns the list of $.resultType|publicPlural$ +func (c *$.type|privatePlural$) watchList(ctx context.Context, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil{ + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &$.resultType|raw$List{} + err = c.client.Get(). $if .namespaced$Namespace(c.ns).$end$ Resource("$.type|resource$"). VersionedParams(&opts, $.schemeParameterCodec|raw$). Timeout(timeout). - Watch(ctx) + WatchList(ctx). + Into(result) + return } ` @@ -659,8 +787,8 @@ var patchTemplate = ` // Patch applies the patch and returns the patched $.resultType|private$. func (c *$.type|privatePlural$) Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error) { result = &$.resultType|raw${} - err = c.client.Patch(pt). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Patch(pt). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(name). SubResource(subresources...). @@ -688,42 +816,10 @@ func (c *$.type|privatePlural$) Apply(ctx context.Context, $.inputType|private$ return nil, fmt.Errorf("$.inputType|private$.Name must be provided to Apply") } result = &$.resultType|raw${} - err = c.client.Patch($.ApplyPatchType|raw$). - $if .namespaced$Namespace(c.ns).$end$ - Resource("$.type|resource$"). - Name(*name). - VersionedParams(&patchOpts, $.schemeParameterCodec|raw$). - Body(data). - Do(ctx). - Into(result) - return -} -` - -var applyStatusTemplate = ` -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *$.type|privatePlural$) ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) { - if $.inputType|private$ == nil { - return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := $.jsonMarshal|raw$($.inputType|private$) - if err != nil { - return nil, err - } - - name := $.inputType|private$.Name - if name == nil { - return nil, fmt.Errorf("$.inputType|private$.Name must be provided to Apply") - } - - result = &$.resultType|raw${} - err = c.client.Patch($.ApplyPatchType|raw$). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Patch($.ApplyPatchType|raw$). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name(*name). - SubResource("status"). VersionedParams(&patchOpts, $.schemeParameterCodec|raw$). Body(data). Do(ctx). @@ -746,8 +842,8 @@ func (c *$.type|privatePlural$) Apply(ctx context.Context, $.type|private$Name s } result = &$.resultType|raw${} - err = c.client.Patch($.ApplyPatchType|raw$). - $if .namespaced$Namespace(c.ns).$end$ + err = c.GetClient().Patch($.ApplyPatchType|raw$). + $if .namespaced$Namespace(c.GetNamespace()).$end$ Resource("$.type|resource$"). Name($.type|private$Name). SubResource("$.subresourcePath$"). diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go index a87d7571..72290557 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/scheme/generator_for_scheme.go @@ -20,24 +20,24 @@ import ( "fmt" "io" "os" + "path" "path/filepath" "strings" - "k8s.io/code-generator/cmd/client-gen/path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) // GenScheme produces a package for a clientset with the scheme, codecs and parameter codecs. type GenScheme struct { - generator.DefaultGen - OutputPackage string + generator.GoGenerator + OutputPkg string // Must be a Go import-path + OutputPath string // optional Groups []clientgentypes.GroupVersions GroupGoNames map[clientgentypes.GroupVersion]string InputPackages map[clientgentypes.GroupVersion]string - OutputPath string ImportTracker namer.ImportTracker PrivateScheme bool CreateRegistry bool @@ -46,7 +46,7 @@ type GenScheme struct { func (g *GenScheme) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ - "raw": namer.NewRawNamer(g.OutputPackage, g.ImportTracker), + "raw": namer.NewRawNamer(g.OutputPkg, g.ImportTracker), } } @@ -66,14 +66,14 @@ func (g *GenScheme) Imports(c *generator.Context) (imports []string) { if g.CreateRegistry { // import the install package for internal clientsets instead of the type package with register.go if version.Version != "" { - packagePath = filepath.Dir(packagePath) + packagePath = path.Dir(packagePath) } - packagePath = filepath.Join(packagePath, "install") + packagePath = path.Join(packagePath, "install") - imports = append(imports, fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath))) + imports = append(imports, fmt.Sprintf("%s \"%s\"", groupAlias, packagePath)) break } else { - imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.Version.NonEmpty()), path.Vendorless(packagePath))) + imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.Version.NonEmpty()), packagePath)) } } } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go index e74de077..5218dfad 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" ) var supportedTags = []string{ @@ -192,7 +192,7 @@ func MustParseClientGenTags(lines []string) Tags { // tags are provided. func ParseClientGenTags(lines []string) (Tags, error) { ret := Tags{} - values := types.ExtractCommentTags("+", lines) + values := gengo.ExtractCommentTags("+", lines) var value []string value, ret.GenerateClient = values["genclient"] // Check the old format and error when used to avoid generating client when //+genclient=false diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/main.go b/vendor/k8s.io/code-generator/cmd/client-gen/main.go index 64a1274d..09866e93 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/main.go @@ -23,40 +23,44 @@ import ( "github.com/spf13/pflag" "k8s.io/klog/v2" - generatorargs "k8s.io/code-generator/cmd/client-gen/args" + "k8s.io/code-generator/cmd/client-gen/args" "k8s.io/code-generator/cmd/client-gen/generators" "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" ) func main() { klog.InitFlags(nil) - genericArgs, customArgs := generatorargs.NewDefaults() + args := args.New() - // Override defaults. - // TODO: move this out of client-gen - genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/clientset_generated/" - - genericArgs.AddFlags(pflag.CommandLine) - customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen + args.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() // add group version package as input dirs for gengo - for _, pkg := range customArgs.Groups { + inputPkgs := []string{} + for _, pkg := range args.Groups { for _, v := range pkg.Versions { - genericArgs.InputDirs = append(genericArgs.InputDirs, v.Package) + inputPkgs = append(inputPkgs, v.Package) } } - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { klog.Fatalf("Error: %v", err) } - if err := genericArgs.Execute( - generators.NameSystems(util.PluralExceptionListToMapOrDie(customArgs.PluralExceptions)), + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + + if err := gengo.Execute( + generators.NameSystems(util.PluralExceptionListToMapOrDie(args.PluralExceptions)), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + inputPkgs, ); err != nil { klog.Fatalf("Error: %v", err) } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go b/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go deleted file mode 100644 index 19b269bd..00000000 --- a/vendor/k8s.io/code-generator/cmd/client-gen/path/path.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package path - -import "strings" - -// Vendorless removes the longest match of "*/vendor/" from the front of p. -// It is useful if a package locates in vendor/, e.g., -// k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1, because gengo -// indexes the package with its import path, e.g., -// k8s.io/apimachinery/pkg/apis/meta/v1, -func Vendorless(p string) string { - if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { - return p[pos+len("/vendor/"):] - } - return p -} diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go index 59f2fd44..c84a7753 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/helpers.go @@ -22,7 +22,7 @@ import ( "sort" "strings" - "k8s.io/gengo/namer" + "k8s.io/gengo/v2/namer" ) // ToGroupVersion turns "group/version" string into a GroupVersion struct. It reports error @@ -116,6 +116,6 @@ func ToGroupInstallPackages(groups []GroupVersions, groupGoNames map[GroupVersio } // NormalizeGroupVersion calls normalizes the GroupVersion. -//func NormalizeGroupVersion(gv GroupVersion) GroupVersion { -// return GroupVersion{Group: gv.Group.NonEmpty(), Version: gv.Version, NonEmptyVersion: normalization.Version(gv.Version)} -//} +// func NormalizeGroupVersion(gv GroupVersion) GroupVersion { +// return GroupVersion{Group: gv.Group.NonEmpty(), Version: gv.Version, NonEmptyVersion: normalization.Version(gv.Version)} +// } diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go index a5cc37cf..df030e84 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/types/types.go @@ -42,7 +42,7 @@ func (g Group) String() string { } func (g Group) NonEmpty() string { - if g == "api" { + if g == "" { return "core" } return string(g) @@ -56,6 +56,8 @@ func (g Group) PackageName() string { return strings.ToLower(parts[0]) } +type Kind string + type PackageVersion struct { Version // The fully qualified package, e.g. k8s.io/kubernetes/pkg/apis/apps, where the types.go is found. @@ -67,14 +69,24 @@ type GroupVersion struct { Version Version } +type GroupVersionKind struct { + Group Group + Version Version + Kind Kind +} + func (gv GroupVersion) ToAPIVersion() string { - if len(gv.Group) > 0 && gv.Group.NonEmpty() != "core" { + if len(gv.Group) > 0 && gv.Group != "" { return gv.Group.String() + "/" + gv.Version.String() } else { return gv.Version.String() } } +func (gv GroupVersion) WithKind(kind Kind) GroupVersionKind { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} +} + type GroupVersions struct { // The name of the package for this group, e.g. apps. PackageName string diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go index c69280b1..cc61c48d 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/args/args.go @@ -20,7 +20,6 @@ import ( "fmt" "github.com/spf13/pflag" - "k8s.io/gengo/args" ) // DefaultBasePeerDirs are the peer-dirs nearly everybody will use, i.e. those coming from @@ -31,8 +30,10 @@ var DefaultBasePeerDirs = []string{ "k8s.io/apimachinery/pkg/runtime", } -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs struct { +type Args struct { + // The filename of the generated results. + OutputFile string + // Base peer dirs which nearly everybody will use, i.e. outside of Kubernetes core. Peer dirs // are declared to make the generator pick up manually written conversion funcs from external // packages. @@ -52,39 +53,40 @@ type CustomArgs struct { // (within the allowed uses of unsafe) and is equivalent to a proposed Golang change to // allow structs that are identical to be assigned to each other. SkipUnsafe bool + + // GoHeaderFile is the path to a boilerplate header file for generated + // code. + GoHeaderFile string } -// NewDefaults returns default arguments for the generator. -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - genericArgs := args.Default().WithoutDefaultFlagParsing() - customArgs := &CustomArgs{ +// New returns default arguments for the generator. +func New() *Args { + return &Args{ BasePeerDirs: DefaultBasePeerDirs, SkipUnsafe: false, } - genericArgs.CustomArgs = customArgs - genericArgs.OutputFileBaseName = "conversion_generated" - return genericArgs, customArgs } // AddFlags add the generator flags to the flag set. -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { - pflag.CommandLine.StringSliceVar(&ca.BasePeerDirs, "base-peer-dirs", ca.BasePeerDirs, +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.OutputFile, "output-file", "generated.conversion.go", + "the name of the file to be generated") + fs.StringSliceVar(&args.BasePeerDirs, "base-peer-dirs", args.BasePeerDirs, "Comma-separated list of apimachinery import paths which are considered, after tag-specified peers, for conversions. Only change these if you have very good reasons.") - pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, + fs.StringSliceVar(&args.ExtraPeerDirs, "extra-peer-dirs", args.ExtraPeerDirs, "Application specific comma-separated list of import paths which are considered, after tag-specified peers and base-peer-dirs, for conversions.") - pflag.CommandLine.StringSliceVar(&ca.ExtraDirs, "extra-dirs", ca.ExtraDirs, + fs.StringSliceVar(&args.ExtraDirs, "extra-dirs", args.ExtraDirs, "Application specific comma-separated list of import paths which are loaded and considered for callable conversions, but are not considered peers for conversion.") - pflag.CommandLine.BoolVar(&ca.SkipUnsafe, "skip-unsafe", ca.SkipUnsafe, + fs.BoolVar(&args.SkipUnsafe, "skip-unsafe", args.SkipUnsafe, "If true, will not generate code using unsafe pointer conversions; resulting code may be slower.") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") } // Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - _ = genericArgs.CustomArgs.(*CustomArgs) - - if len(genericArgs.OutputFileBaseName) == 0 { - return fmt.Errorf("output file base name cannot be empty") +func (args *Args) Validate() error { + if len(args.OutputFile) == 0 { + return fmt.Errorf("--output-file must be specified") } - return nil } diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go index 5b734797..f58130af 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -20,20 +20,17 @@ import ( "bytes" "fmt" "io" - "path/filepath" + "path" "reflect" "sort" "strings" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - + "k8s.io/code-generator/cmd/conversion-gen/args" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" - - conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" - genutil "k8s.io/code-generator/pkg/util" ) // These are the comment tags that carry parameters for conversion generation. @@ -52,24 +49,24 @@ const ( ) func extractTag(comments []string) []string { - return types.ExtractCommentTags("+", comments)[tagName] + return gengo.ExtractCommentTags("+", comments)[tagName] } func extractExplicitFromTag(comments []string) []string { - return types.ExtractCommentTags("+", comments)[explicitFromTagName] + return gengo.ExtractCommentTags("+", comments)[explicitFromTagName] } func extractExternalTypesTag(comments []string) []string { - return types.ExtractCommentTags("+", comments)[externalTypesTagName] + return gengo.ExtractCommentTags("+", comments)[externalTypesTagName] } func isCopyOnly(comments []string) bool { - values := types.ExtractCommentTags("+", comments)["k8s:conversion-fn"] + values := gengo.ExtractCommentTags("+", comments)["k8s:conversion-fn"] return len(values) == 1 && values[0] == "copy-only" } func isDrop(comments []string) bool { - values := types.ExtractCommentTags("+", comments)["k8s:conversion-fn"] + values := gengo.ExtractCommentTags("+", comments)["k8s:conversion-fn"] return len(values) == 1 && values[0] == "drop" } @@ -136,7 +133,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package klog.Warning("Skipping nil package passed to getManualConversionFunctions") return } - klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) + klog.V(3).Infof("Scanning for conversion functions in %v", pkg.Path) scopeName := types.Ref(conversionPackagePath, "Scope").Name errorName := types.Ref("", "error").Name @@ -152,27 +149,27 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package klog.Errorf("Function without signature: %#v", f) continue } - klog.V(8).Infof("Considering function %s", f.Name) + klog.V(6).Infof("Considering function %s", f.Name) signature := f.Underlying.Signature // Check whether the function is conversion function. // Note that all of them have signature: // func Convert_inType_To_outType(inType, outType, conversion.Scope) error if signature.Receiver != nil { - klog.V(8).Infof("%s has a receiver", f.Name) + klog.V(6).Infof("%s has a receiver", f.Name) continue } if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { - klog.V(8).Infof("%s has wrong parameters", f.Name) + klog.V(6).Infof("%s has wrong parameters", f.Name) continue } if len(signature.Results) != 1 || signature.Results[0].Name != errorName { - klog.V(8).Infof("%s has wrong results", f.Name) + klog.V(6).Infof("%s has wrong results", f.Name) continue } inType := signature.Parameters[0] outType := signature.Parameters[1] if inType.Kind != types.Pointer || outType.Kind != types.Pointer { - klog.V(8).Infof("%s has wrong parameter types", f.Name) + klog.V(6).Infof("%s has wrong parameter types", f.Name) continue } // Now check if the name satisfies the convention. @@ -180,7 +177,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package args := argsFromType(inType.Elem, outType.Elem) sw.Do("Convert_$.inType|public$_To_$.outType|public$", args) if f.Name.Name == buffer.String() { - klog.V(4).Infof("Found conversion function %s", f.Name) + klog.V(2).Infof("Found conversion function %s", f.Name) key := conversionPair{inType.Elem, outType.Elem} // We might scan the same package twice, and that's OK. if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path { @@ -192,20 +189,19 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package if strings.HasPrefix(f.Name.Name, "Convert_") { klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) } - klog.V(8).Infof("%s has wrong name", f.Name) + klog.V(3).Infof("%s has wrong name", f.Name) } buffer.Reset() } } -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - packages := generator.Packages{} - header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + targets := []generator.Target{} // Accumulate pre-existing conversion functions. // TODO: This is too ad-hoc. We need a better way. @@ -219,131 +215,122 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // have non-trivial conversion memoryEquivalentTypes := equalMemoryTypes{} - // We are generating conversions only for packages that are explicitly - // passed as InputDir. - processed := map[string]bool{} + // First load other "input" packages. We do this as a single call because + // it is MUCH faster. + filteredInputs := make([]string, 0, len(context.Inputs)) + otherPkgs := make([]string, 0, len(context.Inputs)) + pkgToPeers := map[string][]string{} + pkgToExternal := map[string]string{} for _, i := range context.Inputs { - // skip duplicates - if processed[i] { - continue - } - processed[i] = true + klog.V(3).Infof("pre-processing pkg %q", i) - klog.V(5).Infof("considering pkg %q", i) pkg := context.Universe[i] - // typesPkg is where the versioned types are defined. Sometimes it is - // different from pkg. For example, kubernetes core/v1 types are defined - // in vendor/k8s.io/api/core/v1, while pkg is at pkg/api/v1. - typesPkg := pkg - if pkg == nil { - // If the input had no Go files, for example. - continue - } - - // Add conversion and defaulting functions. - getManualConversionFunctions(context, pkg, manualConversions) // Only generate conversions for packages which explicitly request it // by specifying one or more "+k8s:conversion-gen=" // in their doc.go file. peerPkgs := extractTag(pkg.Comments) - if peerPkgs != nil { - klog.V(5).Infof(" tags: %q", peerPkgs) - if len(peerPkgs) == 1 && peerPkgs[0] == "false" { - // If a single +k8s:conversion-gen=false tag is defined, we still want - // the generator to fire for this package for explicit conversions, but - // we are clearing the peerPkgs to not generate any standard conversions. - peerPkgs = nil - } - } else { - klog.V(5).Infof(" no tag") + if peerPkgs == nil { + klog.V(3).Infof(" no tag") continue } - skipUnsafe := false - extraDirs := []string{} - if customArgs, ok := arguments.CustomArgs.(*conversionargs.CustomArgs); ok { - if len(peerPkgs) > 0 { - peerPkgs = append(peerPkgs, customArgs.BasePeerDirs...) - peerPkgs = append(peerPkgs, customArgs.ExtraPeerDirs...) - } - extraDirs = customArgs.ExtraDirs - skipUnsafe = customArgs.SkipUnsafe + klog.V(3).Infof(" tags: %q", peerPkgs) + if len(peerPkgs) == 1 && peerPkgs[0] == "false" { + // If a single +k8s:conversion-gen=false tag is defined, we still want + // the generator to fire for this package for explicit conversions, but + // we are clearing the peerPkgs to not generate any standard conversions. + peerPkgs = nil + } else { + // Save peers for each input + pkgToPeers[i] = peerPkgs } + otherPkgs = append(otherPkgs, peerPkgs...) + // Keep this one for further processing. + filteredInputs = append(filteredInputs, i) - // if the external types are not in the same package where the conversion functions to be generated + // if the external types are not in the same package where the + // conversion functions to be generated externalTypesValues := extractExternalTypesTag(pkg.Comments) if externalTypesValues != nil { if len(externalTypesValues) != 1 { klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) } externalTypes := externalTypesValues[0] - klog.V(5).Infof(" external types tags: %q", externalTypes) - var err error - typesPkg, err = context.AddDirectory(externalTypes) - if err != nil { - klog.Fatalf("cannot import package %s", externalTypes) - } - // update context.Order to the latest context.Universe - orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} - context.Order = orderer.OrderUniverse(context.Universe) + klog.V(3).Infof(" external types tags: %q", externalTypes) + otherPkgs = append(otherPkgs, externalTypes) + pkgToExternal[i] = externalTypes + } else { + pkgToExternal[i] = i } + } - // if the source path is within a /vendor/ directory (for example, - // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow - // generation to output to the proper relative path (under vendor). - // Otherwise, the generator will create the file in the wrong location - // in the output directory. - // TODO: build a more fundamental concept in gengo for dealing with modifications - // to vendored packages. - for i := range peerPkgs { - peerPkgs[i] = genutil.Vendorless(peerPkgs[i]) + // Make sure explicit peer-packages are added. + peers := args.BasePeerDirs + peers = append(peers, args.ExtraPeerDirs...) + if expanded, err := context.FindPackages(peers...); err != nil { + klog.Fatalf("cannot find peer packages: %v", err) + } else { + otherPkgs = append(otherPkgs, expanded...) + // for each pkg, add these extras, too + for k := range pkgToPeers { + pkgToPeers[k] = append(pkgToPeers[k], expanded...) } - for i := range extraDirs { - extraDirs[i] = genutil.Vendorless(extraDirs[i]) + } + + if len(otherPkgs) > 0 { + if _, err := context.LoadPackages(otherPkgs...); err != nil { + klog.Fatalf("cannot load packages: %v", err) } + } + // update context.Order to the latest context.Universe + orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} + context.Order = orderer.OrderUniverse(context.Universe) - // Make sure our peer-packages are added and fully parsed. - for _, pp := range append(peerPkgs, extraDirs...) { - context.AddDir(pp) - p := context.Universe[pp] - if nil == p { - klog.Fatalf("failed to find pkg: %s", pp) - } - getManualConversionFunctions(context, p, manualConversions) + // Look for conversion functions in the peer-packages. + for _, pp := range otherPkgs { + p := context.Universe[pp] + if p == nil { + klog.Fatalf("failed to find pkg: %s", pp) } + getManualConversionFunctions(context, p, manualConversions) + } + + // We are generating conversions only for packages that are explicitly + // passed as InputDir. + for _, i := range filteredInputs { + klog.V(3).Infof("considering pkg %q", i) + pkg := context.Universe[i] + // typesPkg is where the versioned types are defined. Sometimes it is + // different from pkg. For example, kubernetes core/v1 types are defined + // in k8s.io/api/core/v1, while pkg is at pkg/api/v1. + typesPkg := pkg + + // Add conversion and defaulting functions. + getManualConversionFunctions(context, pkg, manualConversions) + + // Find the right input pkg, which might not be this one. + externalTypes := pkgToExternal[i] + typesPkg = context.Universe[externalTypes] unsafeEquality := TypesEqual(memoryEquivalentTypes) - if skipUnsafe { + if args.SkipUnsafe { unsafeEquality = noEquality{} } - path := pkg.Path - // if the source path is within a /vendor/ directory (for example, - // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow - // generation to output to the proper relative path (under vendor). - // Otherwise, the generator will create the file in the wrong location - // in the output directory. - // TODO: build a more fundamental concept in gengo for dealing with modifications - // to vendored packages. - if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { - expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) - if strings.Contains(expandedPath, "/vendor/") { - path = expandedPath - } - } - packages = append(packages, - &generator.DefaultPackage{ - PackageName: filepath.Base(pkg.Path), - PackagePath: path, - HeaderText: header, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{ - NewGenConversion(arguments.OutputFileBaseName, typesPkg.Path, pkg.Path, manualConversions, peerPkgs, unsafeEquality), - } - }, + targets = append(targets, + &generator.SimpleTarget{ + PkgName: path.Base(pkg.Path), + PkgPath: pkg.Path, + PkgDir: pkg.Dir, // output pkg is the same as the input + HeaderComment: boilerplate, FilterFunc: func(c *generator.Context, t *types.Type) bool { return t.Name.Package == typesPkg.Path }, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenConversion(args.OutputFile, typesPkg.Path, pkg.Path, manualConversions, pkgToPeers[pkg.Path], unsafeEquality), + } + }, }) } @@ -351,14 +338,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // from being a candidate for unsafe conversion for k, v := range manualConversions { if isCopyOnly(v.CommentLines) { - klog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) + klog.V(4).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) continue } // this type should be excluded from all equivalence, because the converter must be called. memoryEquivalentTypes.Skip(k.inType, k.outType) } - return packages + return targets } type equalMemoryTypes map[conversionPair]bool @@ -466,7 +453,7 @@ type TypesEqual interface { // genConversion produces a file with a autogenerated conversions. type genConversion struct { - generator.DefaultGen + generator.GoGenerator // the package that contains the types that conversion func are going to be // generated for typesPackage string @@ -482,10 +469,10 @@ type genConversion struct { useUnsafe TypesEqual } -func NewGenConversion(sanitizedName, typesPackage, outputPackage string, manualConversions conversionFuncMap, peerPkgs []string, useUnsafe TypesEqual) generator.Generator { +func NewGenConversion(outputFilename, typesPackage, outputPackage string, manualConversions conversionFuncMap, peerPkgs []string, useUnsafe TypesEqual) generator.Generator { return &genConversion{ - DefaultGen: generator.DefaultGen{ - OptionalName: sanitizedName, + GoGenerator: generator.GoGenerator{ + OutputFilename: outputFilename, }, typesPackage: typesPackage, outputPackage: outputPackage, @@ -538,7 +525,7 @@ func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type if tagvals[0] != "false" { klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) } - klog.V(5).Infof("type %v requests no conversion generation, skipping", t) + klog.V(2).Infof("type %v requests no conversion generation, skipping", t) return false } // TODO: Consider generating functions for other kinds too. @@ -553,7 +540,8 @@ func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type } func getExplicitFromTypes(t *types.Type) []types.Name { - comments := append(t.SecondClosestCommentLines, t.CommentLines...) + comments := t.SecondClosestCommentLines + comments = append(comments, t.CommentLines...) paths := extractExplicitFromTag(comments) result := []types.Name{} for _, path := range paths { @@ -638,7 +626,7 @@ func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, boo } func (g *genConversion) Init(c *generator.Context, w io.Writer) error { - klogV := klog.V(5) + klogV := klog.V(6) if klogV.Enabled() { if m, ok := g.useUnsafe.(equalMemoryTypes); ok { var result []string @@ -694,10 +682,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { } // sort by name of the conversion function sort.Slice(pairs, func(i, j int) bool { - if g.manualConversions[pairs[i]].Name.Name < g.manualConversions[pairs[j]].Name.Name { - return true - } - return false + return g.manualConversions[pairs[i]].Name.Name < g.manualConversions[pairs[j]].Name.Name }) for _, pair := range pairs { args := argsFromType(pair.inType, pair.outType).With("Scope", types.Ref(conversionPackagePath, "Scope")).With("fn", g.manualConversions[pair]) @@ -731,7 +716,7 @@ func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.W } switch { case inType.Name.Package == "net/url" && inType.Name.Name == "Values": - g.generateFromUrlValues(inType, t, sw) + g.generateFromURLValues(inType, t, sw) default: klog.Errorf("Not supported input type: %#v", inType.Name) } @@ -771,7 +756,7 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene // at any nesting level. This makes the autogenerator easy to understand, and // the compiler shouldn't care. func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) { - klog.V(5).Infof("generating %v -> %v", inType, outType) + klog.V(4).Infof("generating %v -> %v", inType, outType) var f func(*types.Type, *types.Type, *generator.SnippetWriter) switch inType.Kind { @@ -948,7 +933,7 @@ func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.Snip sw.Do("}\n", nil) continue } - klog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) + klog.V(2).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) } // If we can't auto-convert, punt before we emit any code. @@ -1087,7 +1072,7 @@ func (g *genConversion) doUnknown(inType, outType *types.Type, sw *generator.Sni sw.Do("// FIXME: Type $.|raw$ is unsupported.\n", inType) } -func (g *genConversion) generateFromUrlValues(inType, outType *types.Type, sw *generator.SnippetWriter) { +func (g *genConversion) generateFromURLValues(inType, outType *types.Type, sw *generator.SnippetWriter) { args := generator.Args{ "inType": inType, "outType": outType, diff --git a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go index 5a461d27..cd52a9b9 100644 --- a/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -102,36 +102,34 @@ import ( generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" "k8s.io/code-generator/cmd/conversion-gen/generators" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" ) func main() { klog.InitFlags(nil) - genericArgs, customArgs := generatorargs.NewDefaults() + args := generatorargs.New() - genericArgs.AddFlags(pflag.CommandLine) - customArgs.AddFlags(pflag.CommandLine) + args.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - // k8s.io/apimachinery/pkg/runtime contains a number of manual conversions, - // that we need to generate conversions. - // Packages being dependencies of explicitly requested packages are only - // partially scanned - only types explicitly used are being traversed. - // Not used functions or types are omitted. - // Adding this explicitly to InputDirs ensures that the package is fully - // scanned and all functions are parsed and processed. - genericArgs.InputDirs = append(genericArgs.InputDirs, "k8s.io/apimachinery/pkg/runtime") - - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { klog.Fatalf("Error: %v", err) } + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + // Run it. - if err := genericArgs.Execute( + if err := gengo.Execute( generators.NameSystems(), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) } diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go index 78971301..f5207a98 100644 --- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/args/args.go @@ -20,35 +20,33 @@ import ( "fmt" "github.com/spf13/pflag" - "k8s.io/gengo/args" - "k8s.io/gengo/examples/deepcopy-gen/generators" ) -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs generators.CustomArgs +type Args struct { + OutputFile string + BoundingDirs []string // Only deal with types rooted under these dirs. + GoHeaderFile string +} -// NewDefaults returns default arguments for the generator. -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - genericArgs := args.Default().WithoutDefaultFlagParsing() - customArgs := &CustomArgs{} - genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there - genericArgs.OutputFileBaseName = "deepcopy_generated" - return genericArgs, customArgs +// New returns default arguments for the generator. +func New() *Args { + return &Args{} } // AddFlags add the generator flags to the flag set. -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { - pflag.CommandLine.StringSliceVar(&ca.BoundingDirs, "bounding-dirs", ca.BoundingDirs, +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.OutputFile, "output-file", "generated.deepcopy.go", + "the name of the file to be generated") + fs.StringSliceVar(&args.BoundingDirs, "bounding-dirs", args.BoundingDirs, "Comma-separated list of import paths which bound the types for which deep-copies will be generated.") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") } // Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - _ = genericArgs.CustomArgs.(*generators.CustomArgs) - - if len(genericArgs.OutputFileBaseName) == 0 { - return fmt.Errorf("output file base name cannot be empty") +func (args *Args) Validate() error { + if len(args.OutputFile) == 0 { + return fmt.Errorf("--output-file must be specified") } - return nil } diff --git a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/generators/deepcopy.go similarity index 86% rename from vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go rename to vendor/k8s.io/code-generator/cmd/deepcopy-gen/generators/deepcopy.go index 03f9109c..c6047aac 100644 --- a/vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/generators/deepcopy.go @@ -19,25 +19,18 @@ package generators import ( "fmt" "io" - "path/filepath" + "path" "sort" "strings" - "k8s.io/gengo/args" - "k8s.io/gengo/examples/set-gen/sets" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - + "k8s.io/code-generator/cmd/deepcopy-gen/args" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) -// CustomArgs is used tby the go2idl framework to pass args specific to this -// generator. -type CustomArgs struct { - BoundingDirs []string // Only deal with types rooted under these dirs. -} - // This is the comment tag that carries parameters for deep-copy generation. const ( tagEnabledName = "k8s:deepcopy-gen" @@ -60,7 +53,7 @@ func extractEnabledTypeTag(t *types.Type) *enabledTagValue { } func extractEnabledTag(comments []string) *enabledTagValue { - tagVals := types.ExtractCommentTags("+", comments)[tagEnabledName] + tagVals := gengo.ExtractCommentTags("+", comments)[tagEnabledName] if tagVals == nil { // No match for the tag. return nil @@ -125,35 +118,28 @@ func DefaultNameSystem() string { return "public" } -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - inputs := sets.NewString(context.Inputs...) - packages := generator.Packages{} - header := append([]byte(fmt.Sprintf("//go:build !%s\n// +build !%s\n\n", arguments.GeneratedBuildTag, arguments.GeneratedBuildTag)), boilerplate...) - boundingDirs := []string{} - if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok { - if customArgs.BoundingDirs == nil { - customArgs.BoundingDirs = context.Inputs - } - for i := range customArgs.BoundingDirs { - // Strip any trailing slashes - they are not exactly "correct" but - // this is friendlier. - boundingDirs = append(boundingDirs, strings.TrimRight(customArgs.BoundingDirs[i], "/")) - } + if args.BoundingDirs == nil { + args.BoundingDirs = context.Inputs + } + for i := range args.BoundingDirs { + // Strip any trailing slashes - they are not exactly "correct" but + // this is friendlier. + boundingDirs = append(boundingDirs, strings.TrimRight(args.BoundingDirs[i], "/")) } - for i := range inputs { - klog.V(5).Infof("Considering pkg %q", i) + targets := []generator.Target{} + + for _, i := range context.Inputs { + klog.V(3).Infof("Considering pkg %q", i) + pkg := context.Universe[i] - if pkg == nil { - // If the input had no Go files, for example. - continue - } ptag := extractEnabledTag(pkg.Comments) ptagValue := "" @@ -164,68 +150,61 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat klog.Fatalf("Package %v: unsupported %s value: %q", i, tagEnabledName, ptagValue) } ptagRegister = ptag.register - klog.V(5).Infof(" tag.value: %q, tag.register: %t", ptagValue, ptagRegister) + klog.V(3).Infof(" tag.value: %q, tag.register: %t", ptagValue, ptagRegister) } else { - klog.V(5).Infof(" no tag") + klog.V(3).Infof(" no tag") } // If the pkg-scoped tag says to generate, we can skip scanning types. pkgNeedsGeneration := (ptagValue == tagValuePackage) if !pkgNeedsGeneration { // If the pkg-scoped tag did not exist, scan all types for one that - // explicitly wants generation. + // explicitly wants generation. Ensure all types that want generation + // can be copied. + var uncopyable []string for _, t := range pkg.Types { - klog.V(5).Infof(" considering type %q", t.Name.String()) + klog.V(3).Infof(" considering type %q", t.Name.String()) ttag := extractEnabledTypeTag(t) if ttag != nil && ttag.value == "true" { - klog.V(5).Infof(" tag=true") + klog.V(3).Infof(" tag=true") if !copyableType(t) { - klog.Fatalf("Type %v requests deepcopy generation but is not copyable", t) + uncopyable = append(uncopyable, fmt.Sprintf("%v", t)) + } else { + pkgNeedsGeneration = true } - pkgNeedsGeneration = true - break } } + if len(uncopyable) > 0 { + klog.Fatalf("Types requested deepcopy generation but are not copyable: %s", + strings.Join(uncopyable, ", ")) + } } if pkgNeedsGeneration { klog.V(3).Infof("Package %q needs generation", i) - path := pkg.Path - // if the source path is within a /vendor/ directory (for example, - // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow - // generation to output to the proper relative path (under vendor). - // Otherwise, the generator will create the file in the wrong location - // in the output directory. - // TODO: build a more fundamental concept in gengo for dealing with modifications - // to vendored packages. - if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { - expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) - if strings.Contains(expandedPath, "/vendor/") { - path = expandedPath - } - } - packages = append(packages, - &generator.DefaultPackage{ - PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0], - PackagePath: path, - HeaderText: header, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{ - NewGenDeepCopy(arguments.OutputFileBaseName, pkg.Path, boundingDirs, (ptagValue == tagValuePackage), ptagRegister), - } - }, + targets = append(targets, + &generator.SimpleTarget{ + PkgName: strings.Split(path.Base(pkg.Path), ".")[0], + PkgPath: pkg.Path, + PkgDir: pkg.Dir, // output pkg is the same as the input + HeaderComment: boilerplate, FilterFunc: func(c *generator.Context, t *types.Type) bool { return t.Name.Package == pkg.Path }, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenDeepCopy(args.OutputFile, pkg.Path, boundingDirs, (ptagValue == tagValuePackage), ptagRegister), + } + }, }) } } - return packages + return targets } // genDeepCopy produces a file with autogenerated deep-copy functions. type genDeepCopy struct { - generator.DefaultGen + generator.GoGenerator targetPackage string boundingDirs []string allTypes bool @@ -234,10 +213,10 @@ type genDeepCopy struct { typesForInit []*types.Type } -func NewGenDeepCopy(sanitizedName, targetPackage string, boundingDirs []string, allTypes, registerTypes bool) generator.Generator { +func NewGenDeepCopy(outputFilename, targetPackage string, boundingDirs []string, allTypes, registerTypes bool) generator.Generator { return &genDeepCopy{ - DefaultGen: generator.DefaultGen{ - OptionalName: sanitizedName, + GoGenerator: generator.GoGenerator{ + OutputFilename: outputFilename, }, targetPackage: targetPackage, boundingDirs: boundingDirs, @@ -268,32 +247,24 @@ func (g *genDeepCopy) Filter(c *generator.Context, t *types.Type) bool { return false } if !copyableType(t) { - klog.V(2).Infof("Type %v is not copyable", t) + klog.V(3).Infof("Type %v is not copyable", t) return false } - klog.V(4).Infof("Type %v is copyable", t) + klog.V(3).Infof("Type %v is copyable", t) g.typesForInit = append(g.typesForInit, t) return true } -func (g *genDeepCopy) copyableAndInBounds(t *types.Type) bool { - if !copyableType(t) { - return false - } - // Only packages within the restricted range can be processed. - if !isRootedUnder(t.Name.Package, g.boundingDirs) { - return false - } - return true -} - // deepCopyMethod returns the signature of a DeepCopy() method, nil or an error // if the type does not match. This allows more efficient deep copy // implementations to be defined by the type's author. The correct signature // for a type T is: -// func (t T) DeepCopy() T +// +// func (t T) DeepCopy() T +// // or: -// func (t *T) DeepCopy() *T +// +// func (t *T) DeepCopy() *T func deepCopyMethod(t *types.Type) (*types.Signature, error) { f, found := t.Methods["DeepCopy"] if !found { @@ -340,9 +311,12 @@ func deepCopyMethodOrDie(t *types.Type) *types.Signature { // if the type is wrong. DeepCopyInto allows more efficient deep copy // implementations to be defined by the type's author. The correct signature // for a type T is: -// func (t T) DeepCopyInto(t *T) +// +// func (t T) DeepCopyInto(t *T) +// // or: -// func (t *T) DeepCopyInto(t *T) +// +// func (t *T) DeepCopyInto(t *T) func deepCopyIntoMethod(t *types.Type) (*types.Signature, error) { f, found := t.Methods["DeepCopyInto"] if !found { @@ -382,18 +356,6 @@ func deepCopyIntoMethodOrDie(t *types.Type) *types.Signature { return ret } -func isRootedUnder(pkg string, roots []string) bool { - // Add trailing / to avoid false matches, e.g. foo/bar vs foo/barn. This - // assumes that bounding dirs do not have trailing slashes. - pkg = pkg + "/" - for _, root := range roots { - if strings.HasPrefix(pkg, root+"/") { - return true - } - } - return false -} - func copyableType(t *types.Type) bool { // If the type opts out of copy-generation, stop. ttag := extractEnabledTypeTag(t) @@ -475,12 +437,12 @@ func (g *genDeepCopy) needsGeneration(t *types.Type) bool { } if g.allTypes && tv == "false" { // The whole package is being generated, but this type has opted out. - klog.V(5).Infof("Not generating for type %v because type opted out", t) + klog.V(2).Infof("Not generating for type %v because type opted out", t) return false } if !g.allTypes && tv != "true" { // The whole package is NOT being generated, and this type has NOT opted in. - klog.V(5).Infof("Not generating for type %v because type did not opt in", t) + klog.V(2).Infof("Not generating for type %v because type did not opt in", t) return false } return true @@ -489,7 +451,7 @@ func (g *genDeepCopy) needsGeneration(t *types.Type) bool { func extractInterfacesTag(t *types.Type) []string { var result []string comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) - values := types.ExtractCommentTags("+", comments)[interfacesTagName] + values := gengo.ExtractCommentTags("+", comments)[interfacesTagName] for _, v := range values { if len(v) == 0 { continue @@ -507,7 +469,7 @@ func extractInterfacesTag(t *types.Type) []string { func extractNonPointerInterfaces(t *types.Type) (bool, error) { comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...) - values := types.ExtractCommentTags("+", comments)[interfacesNonPointerTagName] + values := gengo.ExtractCommentTags("+", comments)[interfacesNonPointerTagName] if len(values) == 0 { return false, nil } @@ -530,7 +492,8 @@ func (g *genDeepCopy) deepCopyableInterfacesInner(c *generator.Context, t *types var ts []*types.Type for _, intf := range intfs { t := types.ParseFullyQualifiedName(intf) - err := c.AddDir(t.Package) + klog.V(3).Infof("Loading package for interface %v", intf) + _, err := c.LoadPackages(t.Package) if err != nil { return nil, err } @@ -586,7 +549,7 @@ func (g *genDeepCopy) GenerateType(c *generator.Context, t *types.Type, w io.Wri if !g.needsGeneration(t) { return nil } - klog.V(5).Infof("Generating deepcopy function for type %v", t) + klog.V(2).Infof("Generating deepcopy functions for type %v", t) sw := generator.NewSnippetWriter(w, c, "$", "$") args := argsFromType(t) @@ -887,7 +850,7 @@ func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) { sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args) sw.Do("}\n", nil) default: - klog.Fatalf("Hit an unsupported type %v for %v, from %v", uft, ft, t) + klog.Fatalf("Hit an unsupported type '%v' for '%v', from %v.%v", uft, ft, t, m.Name) } } } diff --git a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go index 5622c1a1..aaa3155a 100644 --- a/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -16,14 +16,24 @@ limitations under the License. // deepcopy-gen is a tool for auto-generating DeepCopy functions. // -// Given a list of input directories, it will generate functions that -// efficiently perform a full deep-copy of each type. For any type that -// offers a `.DeepCopy()` method, it will simply call that. Otherwise it will -// use standard value assignment whenever possible. If that is not possible it -// will try to call its own generated copy function for the type, if the type is -// within the allowed root packages. Failing that, it will fall back on -// `conversion.Cloner.DeepCopy(val)` to make the copy. The resulting file will -// be stored in the same directory as the processed source package. +// Given a list of input directories, it will generate DeepCopy and +// DeepCopyInto methods that efficiently perform a full deep-copy of each type. +// If these methods already exist (are predefined by the developer), they are +// used instead of generating new ones. Generated code will use standard value +// assignment whenever possible. If that is not possible it will try to call +// its own generated copy function for the type. Failing that, it will fall +// back on `conversion.Cloner.DeepCopy(val)` to make the copy. The resulting +// file will be stored in the same directory as the processed source package. +// +// If interfaces are referenced in types, it is expected that corresponding +// DeepCopyInterfaceName methods exist, e.g. DeepCopyObject for runtime.Object. +// These can be predefined by the developer or generated through tags, see +// below. They must be added to the interfaces themselves manually, e.g. +// +// type Object interface { +// ... +// DeepCopyObject() Object +// } // // Generation is governed by comment tags in the source. Any package may // request DeepCopy generation by including a comment in the file-comments of @@ -32,48 +42,67 @@ limitations under the License. // // +k8s:deepcopy-gen=package // // DeepCopy functions can be generated for individual types, rather than the -// entire package by specifying a comment on the type definion of the form: +// entire package by specifying a comment on the type definition of the form: // // // +k8s:deepcopy-gen=true // // When generating for a whole package, individual types may opt out of -// DeepCopy generation by specifying a comment on the of the form: +// DeepCopy generation by specifying a comment on the type definition of the +// form: // // // +k8s:deepcopy-gen=false // -// Note that registration is a whole-package option, and is not available for -// individual types. +// Additional DeepCopyInterfaceName methods can be generated by specifying a +// comment on the type definition of the form: +// +// // +k8s:deepcopy-gen:interfaces=k8s.io/kubernetes/runtime.Object,k8s.io/kubernetes/runtime.List +// +// This leads to the generation of DeepCopyObject and DeepCopyList with the given +// interfaces as return types. We say that the tagged type implements deepcopy for the +// interfaces. +// +// The deepcopy funcs for interfaces using "+k8s:deepcopy-gen:interfaces" use the pointer +// of the type as receiver. For those special cases where the non-pointer object should +// implement the interface, this can be done with: +// +// // +k8s:deepcopy-gen:nonpointer-interfaces=true package main import ( "flag" "github.com/spf13/pflag" - "k8s.io/gengo/examples/deepcopy-gen/generators" + "k8s.io/code-generator/cmd/deepcopy-gen/args" + "k8s.io/code-generator/cmd/deepcopy-gen/generators" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" "k8s.io/klog/v2" - - generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" ) func main() { klog.InitFlags(nil) - genericArgs, customArgs := generatorargs.NewDefaults() + args := args.New() - genericArgs.AddFlags(pflag.CommandLine) - customArgs.AddFlags(pflag.CommandLine) + args.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { klog.Fatalf("Error: %v", err) } + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + // Run it. - if err := genericArgs.Execute( + if err := gengo.Execute( generators.NameSystems(), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) } diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go index 3c5a042c..52a9d1c6 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/args/args.go @@ -20,34 +20,33 @@ import ( "fmt" "github.com/spf13/pflag" - "k8s.io/gengo/args" - "k8s.io/gengo/examples/defaulter-gen/generators" ) -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs generators.CustomArgs +type Args struct { + OutputFile string + ExtraPeerDirs []string // Always consider these as last-ditch possibilities for conversions. + GoHeaderFile string +} -// NewDefaults returns default arguments for the generator. -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - genericArgs := args.Default().WithoutDefaultFlagParsing() - customArgs := &CustomArgs{} - genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there - genericArgs.OutputFileBaseName = "zz_generated.defaults" - return genericArgs, customArgs +// New returns default arguments for the generator. +func New() *Args { + return &Args{} } // AddFlags add the generator flags to the flag set. -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { - pflag.CommandLine.StringSliceVar(&ca.ExtraPeerDirs, "extra-peer-dirs", ca.ExtraPeerDirs, +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.OutputFile, "output-file", "generated.defaults.go", + "the name of the file to be generated") + fs.StringSliceVar(&args.ExtraPeerDirs, "extra-peer-dirs", args.ExtraPeerDirs, "Comma-separated list of import paths which are considered, after tag-specified peers, for conversions.") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") } // Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - _ = genericArgs.CustomArgs.(*generators.CustomArgs) - - if len(genericArgs.OutputFileBaseName) == 0 { - return fmt.Errorf("output file base name cannot be empty") +func (args *Args) Validate() error { + if len(args.OutputFile) == 0 { + return fmt.Errorf("--output-file must be specified") } return nil diff --git a/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/generators/defaulter.go similarity index 73% rename from vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go rename to vendor/k8s.io/code-generator/cmd/defaulter-gen/generators/defaulter.go index 220ec7e0..64b9ff29 100644 --- a/vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/generators/defaulter.go @@ -21,25 +21,20 @@ import ( "encoding/json" "fmt" "io" - "path/filepath" + "path" "reflect" + "regexp" "strconv" "strings" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - + "k8s.io/code-generator/cmd/defaulter-gen/args" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) -// CustomArgs is used tby the go2idl framework to pass args specific to this -// generator. -type CustomArgs struct { - ExtraPeerDirs []string // Always consider these as last-ditch possibilities for conversions. -} - var typeZeroValue = map[string]interface{}{ "uint": 0., "uint8": 0., @@ -70,19 +65,19 @@ const inputTagName = "k8s:defaulter-gen-input" const defaultTagName = "default" func extractDefaultTag(comments []string) []string { - return types.ExtractCommentTags("+", comments)[defaultTagName] + return gengo.ExtractCommentTags("+", comments)[defaultTagName] } func extractTag(comments []string) []string { - return types.ExtractCommentTags("+", comments)[tagName] + return gengo.ExtractCommentTags("+", comments)[tagName] } func extractInputTag(comments []string) []string { - return types.ExtractCommentTags("+", comments)[inputTagName] + return gengo.ExtractCommentTags("+", comments)[inputTagName] } func checkTag(comments []string, require ...string) bool { - values := types.ExtractCommentTags("+", comments)[tagName] + values := gengo.ExtractCommentTags("+", comments)[tagName] if len(require) == 0 { return len(values) == 1 && values[0] == "" } @@ -226,14 +221,13 @@ func getManualDefaultingFunctions(context *generator.Context, pkg *types.Package } } -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - packages := generator.Packages{} - header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) + targets := []generator.Target{} // Accumulate pre-existing default functions. // TODO: This is too ad-hoc. We need a better way. @@ -242,36 +236,77 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat buffer := &bytes.Buffer{} sw := generator.NewSnippetWriter(buffer, context, "$", "$") - // We are generating defaults only for packages that are explicitly - // passed as InputDir. + // First load other "input" packages. We do this as a single call because + // it is MUCH faster. + inputPkgs := make([]string, 0, len(context.Inputs)) + pkgToInput := map[string]string{} for _, i := range context.Inputs { klog.V(5).Infof("considering pkg %q", i) + pkg := context.Universe[i] - if pkg == nil { - // If the input had no Go files, for example. - continue + + // if the types are not in the same package where the defaulter functions to be generated + inputTags := extractInputTag(pkg.Comments) + if len(inputTags) > 1 { + panic(fmt.Sprintf("there may only be one input tag, got %#v", inputTags)) + } + if len(inputTags) == 1 { + inputPath := inputTags[0] + if strings.HasPrefix(inputPath, "./") || strings.HasPrefix(inputPath, "../") { + // this is a relative dir, which will not work under gomodules. + // join with the local package path, but warn + klog.Warningf("relative path %s=%s will not work under gomodule mode; use full package path (as used by 'import') instead", inputTagName, inputPath) + inputPath = path.Join(pkg.Path, inputTags[0]) + } + + klog.V(5).Infof(" input pkg %v", inputPath) + inputPkgs = append(inputPkgs, inputPath) + pkgToInput[i] = inputPath + } else { + pkgToInput[i] = i + } + } + + // Make sure explicit peer-packages are added. + var peerPkgs []string + for _, pkg := range args.ExtraPeerDirs { + // In case someone specifies a peer as a path into vendor, convert + // it to its "real" package path. + if i := strings.Index(pkg, "/vendor/"); i != -1 { + pkg = pkg[i+len("/vendor/"):] + } + peerPkgs = append(peerPkgs, pkg) + } + if expanded, err := context.FindPackages(peerPkgs...); err != nil { + klog.Fatalf("cannot find peer packages: %v", err) + } else { + peerPkgs = expanded // now in fully canonical form + } + inputPkgs = append(inputPkgs, peerPkgs...) + + if len(inputPkgs) > 0 { + if _, err := context.LoadPackages(inputPkgs...); err != nil { + klog.Fatalf("cannot load packages: %v", err) } + } + // update context.Order to the latest context.Universe + orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} + context.Order = orderer.OrderUniverse(context.Universe) + + for _, i := range context.Inputs { + pkg := context.Universe[i] + // typesPkg is where the types that needs defaulter are defined. // Sometimes it is different from pkg. For example, kubernetes core/v1 - // types are defined in vendor/k8s.io/api/core/v1, while pkg is at - // pkg/api/v1. + // types are defined in k8s.io/api/core/v1, while the pkg which holds + // defaulter code is at k/k/pkg/api/v1. typesPkg := pkg // Add defaulting functions. getManualDefaultingFunctions(context, pkg, existingDefaulters) - var peerPkgs []string - if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok { - for _, pkg := range customArgs.ExtraPeerDirs { - if i := strings.Index(pkg, "/vendor/"); i != -1 { - pkg = pkg[i+len("/vendor/"):] - } - peerPkgs = append(peerPkgs, pkg) - } - } - // Make sure our peer-packages are added and fully parsed. + // Also look for defaulting functions in peer-packages. for _, pp := range peerPkgs { - context.AddDir(pp) getManualDefaultingFunctions(context, context.Universe[pp], existingDefaulters) } @@ -311,30 +346,9 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat return false } - // if the types are not in the same package where the defaulter functions to be generated - inputTags := extractInputTag(pkg.Comments) - if len(inputTags) > 1 { - panic(fmt.Sprintf("there could only be one input tag, got %#v", inputTags)) - } - if len(inputTags) == 1 { - var err error - - inputPath := inputTags[0] - if strings.HasPrefix(inputPath, "./") || strings.HasPrefix(inputPath, "../") { - // this is a relative dir, which will not work under gomodules. - // join with the local package path, but warn - klog.Warningf("relative path %s=%s will not work under gomodule mode; use full package path (as used by 'import') instead", inputTagName, inputPath) - inputPath = filepath.Join(pkg.Path, inputTags[0]) - } - - typesPkg, err = context.AddDirectory(inputPath) - if err != nil { - klog.Fatalf("cannot import package %s", inputPath) - } - // update context.Order to the latest context.Universe - orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} - context.Order = orderer.OrderUniverse(context.Universe) - } + // Find the right input pkg, which might not be this one. + inputPath := pkgToInput[i] + typesPkg = context.Universe[inputPath] newDefaulters := defaulterFuncMap{} for _, t := range typesPkg.Types { @@ -393,37 +407,25 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat klog.V(5).Infof("no defaulters in package %s", pkg.Name) } - path := pkg.Path - // if the source path is within a /vendor/ directory (for example, - // k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow - // generation to output to the proper relative path (under vendor). - // Otherwise, the generator will create the file in the wrong location - // in the output directory. - // TODO: build a more fundamental concept in gengo for dealing with modifications - // to vendored packages. - if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) { - expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase) - if strings.Contains(expandedPath, "/vendor/") { - path = expandedPath - } - } + targets = append(targets, + &generator.SimpleTarget{ + PkgName: path.Base(pkg.Path), + PkgPath: pkg.Path, + PkgDir: pkg.Dir, // output pkg is the same as the input + HeaderComment: boilerplate, - packages = append(packages, - &generator.DefaultPackage{ - PackageName: filepath.Base(pkg.Path), - PackagePath: path, - HeaderText: header, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{ - NewGenDefaulter(arguments.OutputFileBaseName, typesPkg.Path, pkg.Path, existingDefaulters, newDefaulters, peerPkgs), - } - }, FilterFunc: func(c *generator.Context, t *types.Type) bool { return t.Name.Package == typesPkg.Path }, + + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Generator{ + NewGenDefaulter(args.OutputFile, typesPkg.Path, pkg.Path, existingDefaulters, newDefaulters, peerPkgs), + } + }, }) } - return packages + return targets } // callTreeForType contains fields necessary to build a tree for types. @@ -441,6 +443,8 @@ func newCallTreeForType(existingDefaulters, newDefaulters defaulterFuncMap) *cal } } +// resolveType follows pointers and aliases of `t` until reaching the first +// non-pointer type in `t's` herarchy func resolveTypeAndDepth(t *types.Type) (*types.Type, int) { var prev *types.Type depth := 0 @@ -456,6 +460,42 @@ func resolveTypeAndDepth(t *types.Type) (*types.Type, int) { return t, depth } +// getPointerElementPath follows pointers and aliases to returns all +// pointer elements in the path from the given type, to its base value type. +// +// Example: +// +// type MyString string +// type MyStringPointer *MyString +// type MyStringPointerPointer *MyStringPointer +// type MyStringAlias MyStringPointer +// type MyStringAliasPointer *MyStringAlias +// type MyStringAliasDoublePointer **MyStringAlias +// +// t | defaultPointerElementPath(t) +// ---------------------------|---------------------------------------- +// MyString | [] +// MyStringPointer | [MyString] +// MyStringPointerPointer | [MyStringPointer, MyString] +// MyStringAlias | [MyStringPointer, MyString] +// MyStringAliasPointer | [MyStringAlias, MyStringPointer, MyString] +// MyStringAliasDoublePointer | [*MyStringAlias, MyStringAlias, MyStringPointer, MyString] +func getPointerElementPath(t *types.Type) []*types.Type { + var path []*types.Type + for t != nil { + switch t.Kind { + case types.Alias: + t = t.Underlying + case types.Pointer: + t = t.Elem + path = append(path, t) + default: + t = nil + } + } + return path +} + // getNestedDefault returns the first default value when resolving alias types func getNestedDefault(t *types.Type) string { var prev *types.Type @@ -474,63 +514,61 @@ func getNestedDefault(t *types.Type) string { return "" } -func mustEnforceDefault(t *types.Type, depth int, omitEmpty bool) (interface{}, error) { - if depth > 0 { - return nil, nil +var refRE = regexp.MustCompile(`^ref\((?P[^"]+)\)$`) +var refREIdentIndex = refRE.SubexpIndex("reference") + +// parseSymbolReference looks for strings that match one of the following: +// - ref(Ident) +// - ref(pkgpath.Ident) +// If the input string matches either of these, it will return the (optional) +// pkgpath, the Ident, and true. Otherwise it will return empty strings and +// false. +func parseSymbolReference(s, sourcePackage string) (types.Name, bool) { + matches := refRE.FindStringSubmatch(s) + if len(matches) < refREIdentIndex || matches[refREIdentIndex] == "" { + return types.Name{}, false } - switch t.Kind { - case types.Pointer, types.Map, types.Slice, types.Array, types.Interface: - return nil, nil - case types.Struct: - return map[string]interface{}{}, nil - case types.Builtin: - if !omitEmpty { - if zero, ok := typeZeroValue[t.String()]; ok { - return zero, nil - } else { - return nil, fmt.Errorf("please add type %v to typeZeroValue struct", t) - } - } - return nil, nil - default: - return nil, fmt.Errorf("not sure how to enforce default for %v", t.Kind) + + contents := matches[refREIdentIndex] + name := types.ParseFullyQualifiedName(contents) + if len(name.Package) == 0 { + name.Package = sourcePackage } + return name, true } -func populateDefaultValue(node *callNode, t *types.Type, tags string, commentLines []string) *callNode { +func populateDefaultValue(node *callNode, t *types.Type, tags string, commentLines []string, commentPackage string) *callNode { defaultMap := extractDefaultTag(commentLines) var defaultString string if len(defaultMap) == 1 { defaultString = defaultMap[0] + } else if len(defaultMap) > 1 { + klog.Fatalf("Found more than one default tag for %v", t.Kind) } - t, depth := resolveTypeAndDepth(t) + baseT, depth := resolveTypeAndDepth(t) if depth > 0 && defaultString == "" { defaultString = getNestedDefault(t) } - if len(defaultMap) > 1 { - klog.Fatalf("Found more than one default tag for %v", t.Kind) - } else if len(defaultMap) == 0 { + + if len(defaultString) == 0 { return node } + var symbolReference types.Name var defaultValue interface{} - if err := json.Unmarshal([]byte(defaultString), &defaultValue); err != nil { + if id, ok := parseSymbolReference(defaultString, commentPackage); ok { + symbolReference = id + defaultString = "" + } else if err := json.Unmarshal([]byte(defaultString), &defaultValue); err != nil { klog.Fatalf("Failed to unmarshal default: %v", err) } - omitEmpty := strings.Contains(reflect.StructTag(tags).Get("json"), "omitempty") - if enforced, err := mustEnforceDefault(t, depth, omitEmpty); err != nil { - klog.Fatal(err) - } else if enforced != nil { - if defaultValue != nil { - if reflect.DeepEqual(defaultValue, enforced) { - // If the default value annotation matches the default value for the type, - // do not generate any defaulting function - return node - } else { - enforcedJSON, _ := json.Marshal(enforced) - klog.Fatalf("Invalid default value (%#v) for non-pointer/non-omitempty. If specified, must be: %v", defaultValue, string(enforcedJSON)) - } + if defaultValue != nil { + zero := typeZeroValue[t.String()] + if reflect.DeepEqual(defaultValue, zero) { + // If the default value annotation matches the default value for the type, + // do not generate any defaulting function + return node } } @@ -540,10 +578,11 @@ func populateDefaultValue(node *callNode, t *types.Type, tags string, commentLin node.markerOnly = true } - node.defaultIsPrimitive = t.IsPrimitive() - node.defaultType = t.String() - node.defaultValue = defaultString - node.defaultDepth = depth + node.defaultIsPrimitive = baseT.IsPrimitive() + node.defaultType = baseT + node.defaultTopLevelType = t + node.defaultValue.InlineConstant = defaultString + node.defaultValue.SymbolReference = symbolReference return node } @@ -564,7 +603,7 @@ func (c *callTreeForType) build(t *types.Type, root bool) *callNode { parent.elem = true } - defaults, _ := c.existingDefaulters[t] + defaults := c.existingDefaulters[t] newDefaults, generated := c.newDefaulters[t] switch { case !root && generated && newDefaults.object != nil: @@ -619,7 +658,7 @@ func (c *callTreeForType) build(t *types.Type, root bool) *callNode { child.elem = true } parent.children = append(parent.children, *child) - } else if member := populateDefaultValue(nil, t.Elem, "", t.Elem.CommentLines); member != nil { + } else if member := populateDefaultValue(nil, t.Elem, "", t.Elem.CommentLines, t.Elem.Name.Package); member != nil { member.index = true parent.children = append(parent.children, *member) } @@ -627,7 +666,7 @@ func (c *callTreeForType) build(t *types.Type, root bool) *callNode { if child := c.build(t.Elem, false); child != nil { child.key = true parent.children = append(parent.children, *child) - } else if member := populateDefaultValue(nil, t.Elem, "", t.Elem.CommentLines); member != nil { + } else if member := populateDefaultValue(nil, t.Elem, "", t.Elem.CommentLines, t.Elem.Name.Package); member != nil { member.key = true parent.children = append(parent.children, *member) } @@ -644,9 +683,9 @@ func (c *callTreeForType) build(t *types.Type, root bool) *callNode { } if child := c.build(field.Type, false); child != nil { child.field = name - populateDefaultValue(child, field.Type, field.Tags, field.CommentLines) + populateDefaultValue(child, field.Type, field.Tags, field.CommentLines, field.Type.Name.Package) parent.children = append(parent.children, *child) - } else if member := populateDefaultValue(nil, field.Type, field.Tags, field.CommentLines); member != nil { + } else if member := populateDefaultValue(nil, field.Type, field.Tags, field.CommentLines, t.Name.Package); member != nil { member.field = name parent.children = append(parent.children, *member) } @@ -657,7 +696,7 @@ func (c *callTreeForType) build(t *types.Type, root bool) *callNode { } } if len(parent.children) == 0 && len(parent.call) == 0 { - //klog.V(6).Infof("decided type %s needs no generation", t.Name) + // klog.V(6).Infof("decided type %s needs no generation", t.Name) return nil } return parent @@ -670,7 +709,7 @@ const ( // genDefaulter produces a file with a autogenerated conversions. type genDefaulter struct { - generator.DefaultGen + generator.GoGenerator typesPackage string outputPackage string peerPackages []string @@ -680,17 +719,17 @@ type genDefaulter struct { typesForInit []*types.Type } -func NewGenDefaulter(sanitizedName, typesPackage, outputPackage string, existingDefaulters, newDefaulters defaulterFuncMap, peerPkgs []string) generator.Generator { +func NewGenDefaulter(outputFilename, typesPackage, outputPackage string, existingDefaulters, newDefaulters defaulterFuncMap, peerPkgs []string) generator.Generator { return &genDefaulter{ - DefaultGen: generator.DefaultGen{ - OptionalName: sanitizedName, + GoGenerator: generator.GoGenerator{ + OutputFilename: outputFilename, }, typesPackage: typesPackage, outputPackage: outputPackage, peerPackages: peerPkgs, newDefaulters: newDefaulters, existingDefaulters: existingDefaulters, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(outputPackage), typesForInit: make([]*types.Type, 0), } } @@ -766,6 +805,15 @@ func (g *genDefaulter) GenerateType(c *generator.Context, t *types.Type, w io.Wr } i := 0 callTree.VisitInOrder(func(ancestors []*callNode, current *callNode) { + if ref := ¤t.defaultValue.SymbolReference; len(ref.Name) > 0 { + // Ensure package for symbol is imported in output generation + g.imports.AddSymbol(*ref) + + // Rewrite the fully qualified name using the local package name + // from the imports + ref.Package = g.imports.LocalNameOf(ref.Package) + } + if len(current.call) == 0 { return } @@ -795,26 +843,26 @@ func (g *genDefaulter) generateDefaulter(inType *types.Type, callTree *callNode, // how in Go code an access would be performed. For example, if a defaulting function exists on a container // lifecycle hook, to invoke that defaulter correctly would require this Go code: // -// for i := range pod.Spec.Containers { -// o := &pod.Spec.Containers[i] -// if o.LifecycleHook != nil { -// SetDefaults_LifecycleHook(o.LifecycleHook) -// } -// } +// for i := range pod.Spec.Containers { +// o := &pod.Spec.Containers[i] +// if o.LifecycleHook != nil { +// SetDefaults_LifecycleHook(o.LifecycleHook) +// } +// } // // That would be represented by a call tree like: // -// callNode -// field: "Spec" -// children: -// - field: "Containers" -// children: -// - index: true -// children: -// - field: "LifecycleHook" -// elem: true -// call: -// - SetDefaults_LifecycleHook +// callNode +// field: "Spec" +// children: +// - field: "Containers" +// children: +// - index: true +// children: +// - field: "LifecycleHook" +// elem: true +// call: +// - SetDefaults_LifecycleHook // // which we can traverse to build that Go struct (you must call the field Spec, then Containers, then range over // that field, then check whether the LifecycleHook field is nil, before calling SetDefaults_LifecycleHook on @@ -836,7 +884,7 @@ type callNode struct { // defaultValue is the defaultValue of a callNode struct // Only primitive types and pointer types are eligible to have a default value - defaultValue string + defaultValue defaultValue // defaultIsPrimitive is used to determine how to assign the default value. // Primitive types will be directly assigned while complex types will use JSON unmarshalling @@ -845,21 +893,41 @@ type callNode struct { // markerOnly is true if the callNode exists solely to fill in a default value markerOnly bool - // defaultDepth is used to determine pointer level of the default value - // For example 1 corresponds to setting a default value and taking its pointer while - // 2 corresponds to setting a default value and taking its pointer's pointer - // 0 implies that no pointers are used - // This is used in situations where a field is a pointer to a primitive value rather than a primitive value itself. + // defaultType is the transitive underlying/element type of the node. + // The provided default value literal or reference is expected to be + // convertible to this type. // - // type A { - // +default="foo" - // Field *string - // } - defaultDepth int - - // defaultType is the type of the default value. + // e.g: + // node type = *string -> defaultType = string + // node type = StringPointerAlias -> defaultType = string // Only populated if defaultIsPrimitive is true - defaultType string + defaultType *types.Type + + // defaultTopLevelType is the final type the value should resolve to + // This is in constrast with default type, which resolves aliases and pointers. + defaultTopLevelType *types.Type +} + +type defaultValue struct { + // The value was written directly in the marker comment and + // has been parsed as JSON + InlineConstant string + // The name of the symbol relative to the parsed package path + // i.e. k8s.io/pkg.apis.v1.Foo if from another package or simply `Foo` + // if within the same package. + SymbolReference types.Name +} + +func (d defaultValue) IsEmpty() bool { + resolved := d.Resolved() + return resolved == "" +} + +func (d defaultValue) Resolved() string { + if len(d.InlineConstant) > 0 { + return d.InlineConstant + } + return d.SymbolReference.String() } // CallNodeVisitorFunc is a function for visiting a call tree. ancestors is the list of all parents @@ -918,7 +986,7 @@ func (n *callNode) writeCalls(varName string, isVarPointer bool, sw *generator.S func getTypeZeroValue(t string) (interface{}, error) { defaultZero, ok := typeZeroValue[t] if !ok { - return nil, fmt.Errorf("Cannot find zero value for type %v in typeZeroValue", t) + return nil, fmt.Errorf("cannot find zero value for type %v in typeZeroValue", t) } // To generate the code for empty string, they must be quoted @@ -929,15 +997,14 @@ func getTypeZeroValue(t string) (interface{}, error) { } func (n *callNode) writeDefaulter(varName string, index string, isVarPointer bool, sw *generator.SnippetWriter) { - if n.defaultValue == "" { + if n.defaultValue.IsEmpty() { return } args := generator.Args{ - "defaultValue": n.defaultValue, + "defaultValue": n.defaultValue.Resolved(), "varName": varName, "index": index, - "varDepth": n.defaultDepth, - "varType": n.defaultType, + "varTopType": n.defaultTopLevelType, } variablePlaceholder := "" @@ -961,25 +1028,72 @@ func (n *callNode) writeDefaulter(varName string, index string, isVarPointer boo if n.defaultIsPrimitive { // If the default value is a primitive when the assigned type is a pointer // keep using the address-of operator on the primitive value until the types match - if n.defaultDepth > 0 { - sw.Do(fmt.Sprintf("if %s == nil {\n", variablePlaceholder), args) - sw.Do("var ptrVar$.varDepth$ $.varType$ = $.defaultValue$\n", args) - // We iterate until a depth of 1 instead of 0 because the following line - // `if $.varName$ == &ptrVar1` accounts for 1 level already - for i := n.defaultDepth; i > 1; i-- { - sw.Do("ptrVar$.ptri$ := &ptrVar$.i$\n", generator.Args{"i": fmt.Sprintf("%d", i), "ptri": fmt.Sprintf("%d", (i - 1))}) + if pointerPath := getPointerElementPath(n.defaultTopLevelType); len(pointerPath) > 0 { + // If the destination is a pointer, the last element in + // defaultDepth is the element type of the bottommost pointer: + // the base type of our default value. + destElemType := pointerPath[len(pointerPath)-1] + pointerArgs := args.WithArgs(generator.Args{ + "varDepth": len(pointerPath), + "baseElemType": destElemType, + }) + + sw.Do(fmt.Sprintf("if %s == nil {\n", variablePlaceholder), pointerArgs) + if len(n.defaultValue.InlineConstant) > 0 { + // If default value is a literal then it can be assigned via var stmt + sw.Do("var ptrVar$.varDepth$ $.baseElemType|raw$ = $.defaultValue$\n", pointerArgs) + } else { + // If default value is not a literal then it may need to be casted + // to the base type of the destination pointer + sw.Do("ptrVar$.varDepth$ := $.baseElemType|raw$($.defaultValue$)\n", pointerArgs) + } + + for i := len(pointerPath); i >= 1; i-- { + dest := fmt.Sprintf("ptrVar%d", i-1) + assignment := ":=" + if i == 1 { + // Last assignment is into the storage destination + dest = variablePlaceholder + assignment = "=" + } + + sourceType := "*" + destElemType.String() + if i == len(pointerPath) { + // Initial value is not a pointer + sourceType = destElemType.String() + } + destElemType = pointerPath[i-1] + + // Cannot include `dest` into args since its value may be + // `variablePlaceholder` which is a template, not a value + elementArgs := pointerArgs.WithArgs(generator.Args{ + "assignment": assignment, + "source": fmt.Sprintf("ptrVar%d", i), + "destElemType": destElemType, + }) + + // Skip cast if type is exact match + if destElemType.String() == sourceType { + sw.Do(fmt.Sprintf("%v $.assignment$ &$.source$\n", dest), elementArgs) + } else { + sw.Do(fmt.Sprintf("%v $.assignment$ (*$.destElemType|raw$)(&$.source$)\n", dest), elementArgs) + } } - sw.Do(fmt.Sprintf("%s = &ptrVar1", variablePlaceholder), args) } else { // For primitive types, nil checks cannot be used and the zero value must be determined - defaultZero, err := getTypeZeroValue(n.defaultType) + defaultZero, err := getTypeZeroValue(n.defaultType.String()) if err != nil { klog.Error(err) } args["defaultZero"] = defaultZero sw.Do(fmt.Sprintf("if %s == $.defaultZero$ {\n", variablePlaceholder), args) - sw.Do(fmt.Sprintf("%s = $.defaultValue$", variablePlaceholder), args) + + if len(n.defaultValue.InlineConstant) > 0 { + sw.Do(fmt.Sprintf("%s = $.defaultValue$", variablePlaceholder), args) + } else { + sw.Do(fmt.Sprintf("%s = $.varTopType|raw$($.defaultValue$)", variablePlaceholder), args) + } } } else { sw.Do(fmt.Sprintf("if %s == nil {\n", variablePlaceholder), args) @@ -1046,7 +1160,7 @@ func (n *callNode) WriteMethod(varName string, depth int, ancestors []*callNode, } sw.Do("}\n", nil) case n.key: - if n.defaultValue != "" { + if !n.defaultValue.IsEmpty() { // Map keys are typed and cannot share the same index variable as arrays and other maps index = index + "_" + ancestors[len(ancestors)-1].field vars["index"] = index @@ -1087,13 +1201,13 @@ func (path callPath) String() string { } case p.index: if len(parts) > 0 { - parts[last] = parts[last] + "[i]" + parts[last] += "[i]" } else { parts = append(parts, "[i]") } case p.key: if len(parts) > 0 { - parts[last] = parts[last] + "[key]" + parts[last] += "[key]" } else { parts = append(parts, "[key]") } diff --git a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go index 23d5d327..d57ca066 100644 --- a/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -45,31 +45,37 @@ import ( "flag" "github.com/spf13/pflag" - "k8s.io/gengo/examples/defaulter-gen/generators" + "k8s.io/code-generator/cmd/defaulter-gen/args" + "k8s.io/code-generator/cmd/defaulter-gen/generators" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" "k8s.io/klog/v2" - - generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" ) func main() { klog.InitFlags(nil) - genericArgs, customArgs := generatorargs.NewDefaults() + args := args.New() - genericArgs.AddFlags(pflag.CommandLine) - customArgs.AddFlags(pflag.CommandLine) + args.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { klog.Fatalf("Error: %v", err) } + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + // Run it. - if err := genericArgs.Execute( + if err := gengo.Execute( generators.NameSystems(), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) } diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go index c472b15d..ca21c76c 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go @@ -22,7 +22,6 @@ import ( "bytes" "fmt" "log" - "os" "os/exec" "path/filepath" "sort" @@ -30,19 +29,18 @@ import ( flag "github.com/spf13/pflag" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/parser" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/parser" + "k8s.io/gengo/v2/types" ) type Generator struct { - Common args.GeneratorArgs + GoHeaderFile string APIMachineryPackages string Packages string - OutputBase string - VendorOutputBase string + OutputDir string ProtoImport []string Conditional string Clean bool @@ -53,20 +51,9 @@ type Generator struct { } func New() *Generator { - sourceTree := args.DefaultSourceTree() - common := args.GeneratorArgs{ - OutputBase: sourceTree, - } - defaultProtoImport := filepath.Join(sourceTree, "k8s.io", "kubernetes", "vendor", "github.com", "gogo", "protobuf", "protobuf") - cwd, err := os.Getwd() - if err != nil { - log.Fatalf("Cannot get current directory.") - } + defaultSourceTree := "." return &Generator{ - Common: common, - OutputBase: sourceTree, - VendorOutputBase: filepath.Join(cwd, "vendor"), - ProtoImport: []string{defaultProtoImport}, + OutputDir: defaultSourceTree, APIMachineryPackages: strings.Join([]string{ `+k8s.io/apimachinery/pkg/util/intstr`, `+k8s.io/apimachinery/pkg/api/resource`, @@ -82,13 +69,11 @@ func New() *Generator { } func (g *Generator) BindFlags(flag *flag.FlagSet) { - flag.StringVarP(&g.Common.GoHeaderFilePath, "go-header-file", "h", g.Common.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.") - flag.BoolVar(&g.Common.VerifyOnly, "verify-only", g.Common.VerifyOnly, "If true, only verify existing output, do not write anything.") + flag.StringVarP(&g.GoHeaderFile, "go-header-file", "h", "", "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.") flag.StringVarP(&g.Packages, "packages", "p", g.Packages, "comma-separated list of directories to get input types from. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.") flag.StringVar(&g.APIMachineryPackages, "apimachinery-packages", g.APIMachineryPackages, "comma-separated list of directories to get apimachinery input types from which are needed by any API. Directories prefixed with '-' are not generated, directories prefixed with '+' only create types with explicit IDL instructions.") - flag.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/") - flag.StringVar(&g.VendorOutputBase, "vendor-output-base", g.VendorOutputBase, "The vendor/ directory to look for packages in; defaults to $PWD/vendor/.") - flag.StringSliceVar(&g.ProtoImport, "proto-import", g.ProtoImport, "The search path for the core protobuf .protos, required; defaults $GOPATH/src/k8s.io/kubernetes/vendor/github.com/gogo/protobuf/protobuf.") + flag.StringVar(&g.OutputDir, "output-dir", g.OutputDir, "The base directory under which to generate results.") + flag.StringSliceVar(&g.ProtoImport, "proto-import", g.ProtoImport, "A search path for imported protobufs (may be repeated).") flag.StringVar(&g.Conditional, "conditional", g.Conditional, "An optional Golang build tag condition to add to the generated Go code") flag.BoolVar(&g.Clean, "clean", g.Clean, "If true, remove all generated files for the specified Packages.") flag.BoolVar(&g.OnlyIDL, "only-idl", g.OnlyIDL, "If true, only generate the IDL for each package.") @@ -97,58 +82,43 @@ func (g *Generator) BindFlags(flag *flag.FlagSet) { flag.StringVar(&g.DropEmbeddedFields, "drop-embedded-fields", g.DropEmbeddedFields, "Comma-delimited list of embedded Go types to omit from generated protobufs") } +// This roughly models gengo/v2.Execute. func Run(g *Generator) { - if g.Common.VerifyOnly { - g.OnlyIDL = true - g.Clean = false - } - - b := parser.New() - b.AddBuildTags("proto") - - omitTypes := map[types.Name]struct{}{} - for _, t := range strings.Split(g.DropEmbeddedFields, ",") { - name := types.Name{} - if i := strings.LastIndex(t, "."); i != -1 { - name.Package, name.Name = t[:i], t[i+1:] - } else { - name.Name = t - } - if len(name.Name) == 0 { - log.Fatalf("--drop-embedded-types requires names in the form of [GOPACKAGE.]TYPENAME: %v", t) - } - omitTypes[name] = struct{}{} - } - - boilerplate, err := g.Common.LoadGoBoilerplate() - if err != nil { - log.Fatalf("Failed loading boilerplate (consider using the go-header-file flag): %v", err) - } + // Roughly models gengo/v2.newBuilder. - protobufNames := NewProtobufNamer() - outputPackages := generator.Packages{} - nonOutputPackages := map[string]struct{}{} + p := parser.NewWithOptions(parser.Options{BuildTags: []string{"proto"}}) - var packages []string + var allInputs []string if len(g.APIMachineryPackages) != 0 { - packages = append(packages, strings.Split(g.APIMachineryPackages, ",")...) + allInputs = append(allInputs, strings.Split(g.APIMachineryPackages, ",")...) } if len(g.Packages) != 0 { - packages = append(packages, strings.Split(g.Packages, ",")...) + allInputs = append(allInputs, strings.Split(g.Packages, ",")...) } - if len(packages) == 0 { + if len(allInputs) == 0 { log.Fatalf("Both apimachinery-packages and packages are empty. At least one package must be specified.") } - for _, d := range packages { - generateAllTypes, outputPackage := true, true + // Build up a list of packages to load from all the inputs. Track the + // special modifiers for each. NOTE: This does not support pkg/... syntax. + type modifier struct { + allTypes bool + output bool + name string + } + inputModifiers := map[string]modifier{} + packages := make([]string, 0, len(allInputs)) + + for _, d := range allInputs { + modifier := modifier{allTypes: true, output: true} + switch { case strings.HasPrefix(d, "+"): d = d[1:] - generateAllTypes = false + modifier.allTypes = false case strings.HasPrefix(d, "-"): d = d[1:] - outputPackage = false + modifier.output = false } name := protoSafePackage(d) parts := strings.SplitN(d, "=", 2) @@ -156,41 +126,21 @@ func Run(g *Generator) { d = parts[0] name = parts[1] } - p := newProtobufPackage(d, name, generateAllTypes, omitTypes) - header := append([]byte{}, boilerplate...) - header = append(header, p.HeaderText...) - p.HeaderText = header - protobufNames.Add(p) - if outputPackage { - outputPackages = append(outputPackages, p) - } else { - nonOutputPackages[name] = struct{}{} - } - } + modifier.name = name - if !g.Common.VerifyOnly { - for _, p := range outputPackages { - if err := p.(*protobufPackage).Clean(g.OutputBase); err != nil { - log.Fatalf("Unable to clean package %s: %v", p.Name(), err) - } - } - } - - if g.Clean { - return + packages = append(packages, d) + inputModifiers[d] = modifier } - for _, p := range protobufNames.List() { - if err := b.AddDir(p.Path()); err != nil { - log.Fatalf("Unable to add directory %q: %v", p.Path(), err) - } + // Load all the packages at once. + if err := p.LoadPackages(packages...); err != nil { + log.Fatalf("Unable to load packages: %v", err) } c, err := generator.NewContext( - b, + p, namer.NameSystems{ "public": namer.NewPublicNamer(3), - "proto": protobufNames, }, "public", ) @@ -198,9 +148,63 @@ func Run(g *Generator) { log.Fatalf("Failed making a context: %v", err) } - c.Verify = g.Common.VerifyOnly c.FileTypes["protoidl"] = NewProtoFile() + // Roughly models gengo/v2.Execute calling the + // tool-provided Targets() callback. + + boilerplate, err := gengo.GoBoilerplate(g.GoHeaderFile, "", "") + if err != nil { + log.Fatalf("Failed loading boilerplate (consider using the go-header-file flag): %v", err) + } + + omitTypes := map[types.Name]struct{}{} + for _, t := range strings.Split(g.DropEmbeddedFields, ",") { + name := types.Name{} + if i := strings.LastIndex(t, "."); i != -1 { + name.Package, name.Name = t[:i], t[i+1:] + } else { + name.Name = t + } + if len(name.Name) == 0 { + log.Fatalf("--drop-embedded-types requires names in the form of [GOPACKAGE.]TYPENAME: %v", t) + } + omitTypes[name] = struct{}{} + } + + protobufNames := NewProtobufNamer() + outputPackages := []generator.Target{} + nonOutputPackages := map[string]struct{}{} + + for _, input := range c.Inputs { + mod, found := inputModifiers[input] + if !found { + log.Fatalf("BUG: can't find input modifiers for %q", input) + } + pkg := c.Universe[input] + protopkg := newProtobufPackage(pkg.Path, pkg.Dir, mod.name, mod.allTypes, omitTypes) + header := append([]byte{}, boilerplate...) + header = append(header, protopkg.HeaderComment...) + protopkg.HeaderComment = header + protobufNames.Add(protopkg) + if mod.output { + outputPackages = append(outputPackages, protopkg) + } else { + nonOutputPackages[mod.name] = struct{}{} + } + } + c.Namers["proto"] = protobufNames + + for _, p := range outputPackages { + if err := p.(*protobufPackage).Clean(); err != nil { + log.Fatalf("Unable to clean package %s: %v", p.Name(), err) + } + } + + if g.Clean { + return + } + // order package by imports, importees first deps := deps(c, protobufNames.packages) order, err := importOrder(deps) @@ -213,28 +217,20 @@ func Run(g *Generator) { } sort.Sort(positionOrder{topologicalPos, protobufNames.packages}) - var vendoredOutputPackages, localOutputPackages generator.Packages + var localOutputPackages []generator.Target for _, p := range protobufNames.packages { if _, ok := nonOutputPackages[p.Name()]; ok { // if we're not outputting the package, don't include it in either package list continue } - p.Vendored = strings.Contains(c.Universe[p.PackagePath].SourcePath, "/vendor/") - if p.Vendored { - vendoredOutputPackages = append(vendoredOutputPackages, p) - } else { - localOutputPackages = append(localOutputPackages, p) - } + localOutputPackages = append(localOutputPackages, p) } if err := protobufNames.AssignTypesToPackages(c); err != nil { log.Fatalf("Failed to identify Common types: %v", err) } - if err := c.ExecutePackages(g.VendorOutputBase, vendoredOutputPackages); err != nil { - log.Fatalf("Failed executing vendor generator: %v", err) - } - if err := c.ExecutePackages(g.OutputBase, localOutputPackages); err != nil { + if err := c.ExecuteTargets(localOutputPackages); err != nil { log.Fatalf("Failed executing local generator: %v", err) } @@ -246,13 +242,24 @@ func Run(g *Generator) { log.Fatalf("Unable to find 'protoc': %v", err) } - searchArgs := []string{"-I", ".", "-I", g.OutputBase} + searchArgs := []string{"-I", ".", "-I", g.OutputDir} if len(g.ProtoImport) != 0 { for _, s := range g.ProtoImport { searchArgs = append(searchArgs, "-I", s) } } - args := append(searchArgs, fmt.Sprintf("--gogo_out=%s", g.OutputBase)) + // Despite docs saying that `--gogo_out=paths=source_relative:.` will + // output the .pb.go file to the same directory as the .proto file, it + // doesn't. Given example.com/foo/bar.proto (found in one of the -I paths + // above), the output becomes + // $output_base/example.com/foo/example.com/foo/bar.pb.go - basically + // useless. Users should set the output-dir to a single dir under which + // all the packages in question live (e.g. staging/src in kubernetes). + // Alternately, we could generate into a temp path and then move the + // resulting file back to the input dir, but that seems brittle in other + // ways. + args := searchArgs + args = append(args, fmt.Sprintf("--gogo_out=%s", g.OutputDir)) buf := &bytes.Buffer{} if len(g.Conditional) > 0 { @@ -263,22 +270,16 @@ func Run(g *Generator) { for _, outputPackage := range outputPackages { p := outputPackage.(*protobufPackage) - path := filepath.Join(g.OutputBase, p.ImportPath()) - outputPath := filepath.Join(g.OutputBase, p.OutputPath()) - if p.Vendored { - path = filepath.Join(g.VendorOutputBase, p.ImportPath()) - outputPath = filepath.Join(g.VendorOutputBase, p.OutputPath()) - } + path := filepath.Join(g.OutputDir, p.ImportPath()) + outputPath := filepath.Join(g.OutputDir, p.OutputPath()) // generate the gogoprotobuf protoc cmd := exec.Command("protoc", append(args, path)...) out, err := cmd.CombinedOutput() - if len(out) > 0 { - log.Print(string(out)) - } if err != nil { log.Println(strings.Join(cmd.Args, " ")) - log.Fatalf("Unable to generate protoc on %s: %v", p.PackageName, err) + log.Println(string(out)) + log.Fatalf("Unable to run protoc on %s: %v", p.Name(), err) } if g.SkipGeneratedRewrite { @@ -299,7 +300,7 @@ func Run(g *Generator) { } if err != nil { log.Println(strings.Join(cmd.Args, " ")) - log.Fatalf("Unable to rewrite imports for %s: %v", p.PackageName, err) + log.Fatalf("Unable to rewrite imports for %s: %v", p.Name(), err) } // format and simplify the generated file @@ -310,7 +311,7 @@ func Run(g *Generator) { } if err != nil { log.Println(strings.Join(cmd.Args, " ")) - log.Fatalf("Unable to apply gofmt for %s: %v", p.PackageName, err) + log.Fatalf("Unable to apply gofmt for %s: %v", p.Name(), err) } } @@ -324,10 +325,7 @@ func Run(g *Generator) { p := outputPackage.(*protobufPackage) p.OmitGogo = true } - if err := c.ExecutePackages(g.VendorOutputBase, vendoredOutputPackages); err != nil { - log.Fatalf("Failed executing vendor generator: %v", err) - } - if err := c.ExecutePackages(g.OutputBase, localOutputPackages); err != nil { + if err := c.ExecuteTargets(localOutputPackages); err != nil { log.Fatalf("Failed executing local generator: %v", err) } } @@ -339,10 +337,7 @@ func Run(g *Generator) { continue } - pattern := filepath.Join(g.OutputBase, p.PackagePath, "*.go") - if p.Vendored { - pattern = filepath.Join(g.VendorOutputBase, p.PackagePath, "*.go") - } + pattern := filepath.Join(g.OutputDir, p.Path(), "*.go") files, err := filepath.Glob(pattern) if err != nil { log.Fatalf("Can't glob pattern %q: %v", pattern, err) @@ -362,13 +357,13 @@ func Run(g *Generator) { func deps(c *generator.Context, pkgs []*protobufPackage) map[string][]string { ret := map[string][]string{} for _, p := range pkgs { - pkg, ok := c.Universe[p.PackagePath] + pkg, ok := c.Universe[p.Path()] if !ok { - log.Fatalf("Unrecognized package: %s", p.PackagePath) + log.Fatalf("Unrecognized package: %s", p.Path()) } for _, d := range pkg.Imports { - ret[p.PackagePath] = append(ret[p.PackagePath], d.Path) + ret[p.Path()] = append(ret[p.Path()], d.Path) } } return ret @@ -397,9 +392,9 @@ func importOrder(deps map[string][]string) ([]string, error) { if len(remainingNodes) > 0 { return nil, fmt.Errorf("cycle: remaining nodes: %#v, remaining edges: %#v", remainingNodes, graph) } - for _, n := range sorted { - fmt.Println("topological order", n) - } + // for _, n := range sorted { + // fmt.Println("topological order", n) + // } return sorted, nil } @@ -453,11 +448,9 @@ func (o positionOrder) Len() int { } func (o positionOrder) Less(i, j int) bool { - return o.pos[o.elements[i].PackagePath] < o.pos[o.elements[j].PackagePath] + return o.pos[o.elements[i].Path()] < o.pos[o.elements[j].Path()] } func (o positionOrder) Swap(i, j int) { - x := o.elements[i] - o.elements[i] = o.elements[j] - o.elements[j] = x + o.elements[i], o.elements[j] = o.elements[j], o.elements[i] } diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go index c480a8a6..0fc653dd 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -25,16 +25,18 @@ import ( "strconv" "strings" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" - - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" ) // genProtoIDL produces a .proto IDL. type genProtoIDL struct { - generator.DefaultGen + // This base type is close enough to what we need, if we redefine some + // methods. + generator.GoGenerator localPackage types.Name localGoPackage types.Name imports namer.ImportTracker @@ -63,8 +65,11 @@ func (g *genProtoIDL) PackageVars(c *generator.Context) []string { fmt.Sprintf("option go_package = %q;", g.localGoPackage.Package), } } -func (g *genProtoIDL) Filename() string { return g.OptionalName + ".proto" } + +func (g *genProtoIDL) Filename() string { return g.OutputFilename + ".proto" } + func (g *genProtoIDL) FileType() string { return "protoidl" } + func (g *genProtoIDL) Namers(c *generator.Context) namer.NameSystems { return namer.NameSystems{ // The local namer returns the correct protobuf name for a proto type @@ -75,7 +80,7 @@ func (g *genProtoIDL) Namers(c *generator.Context) namer.NameSystems { // Filter ignores types that are identified as not exportable. func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool { - tagVals := types.ExtractCommentTags("+", t.CommentLines)["protobuf"] + tagVals := gengo.ExtractCommentTags("+", t.CommentLines)["protobuf"] if tagVals != nil { if tagVals[0] == "false" { // Type specified "false". @@ -224,9 +229,8 @@ func (p protobufLocator) GoTypeForName(name types.Name) *types.Type { // ProtoTypeFor locates a Protobuf type for the provided Go type (if possible). func (p protobufLocator) ProtoTypeFor(t *types.Type) (*types.Type, error) { - switch { // we've already converted the type, or it's a map - case t.Kind == types.Protobuf || t.Kind == types.Map: + if t.Kind == types.Protobuf || t.Kind == types.Map { p.tracker.AddType(t) return t, nil } @@ -304,7 +308,7 @@ func (b bodyGen) doStruct(sw *generator.SnippetWriter) error { var alias *types.Type var fields []protoField options := []string{} - allOptions := types.ExtractCommentTags("+", b.t.CommentLines) + allOptions := gengo.ExtractCommentTags("+", b.t.CommentLines) for k, v := range allOptions { switch { case strings.HasPrefix(k, "protobuf.options."): @@ -554,11 +558,11 @@ func protobufTagToField(tag string, field *protoField, m types.Member, t *types. // protobuf:"bytes,3,opt,name=Id,customtype=github.com/gogo/protobuf/test.Uuid" parts := strings.Split(tag, ",") if len(parts) < 3 { - return fmt.Errorf("member %q of %q malformed 'protobuf' tag, not enough segments\n", m.Name, t.Name) + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, not enough segments", m.Name, t.Name) } protoTag, err := strconv.Atoi(parts[1]) if err != nil { - return fmt.Errorf("member %q of %q malformed 'protobuf' tag, field ID is %q which is not an integer: %v\n", m.Name, t.Name, parts[1], err) + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, field ID is %q which is not an integer: %w", m.Name, t.Name, parts[1], err) } field.Tag = protoTag @@ -579,7 +583,7 @@ func protobufTagToField(tag string, field *protoField, m types.Member, t *types. name = types.Name{ Name: parts[0][last+1:], Package: prefix, - Path: strings.Replace(prefix, ".", "/", -1), + Path: strings.ReplaceAll(prefix, ".", "/"), } } else { name = types.Name{ @@ -598,7 +602,7 @@ func protobufTagToField(tag string, field *protoField, m types.Member, t *types. for i, extra := range parts[3:] { parts := strings.SplitN(extra, "=", 2) if len(parts) != 2 { - return fmt.Errorf("member %q of %q malformed 'protobuf' tag, tag %d should be key=value, got %q\n", m.Name, t.Name, i+4, extra) + return fmt.Errorf("member %q of %q malformed 'protobuf' tag, tag %d should be key=value, got %q", m.Name, t.Name, i+4, extra) } switch parts[0] { case "name": diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go index 08a991b1..0031c9bd 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/import_tracker.go @@ -17,8 +17,8 @@ limitations under the License. package protobuf import ( - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) type ImportTracker struct { diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go index e3b21c67..2ad0a953 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/namer.go @@ -21,9 +21,9 @@ import ( "reflect" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) type localNamer struct { @@ -37,11 +37,17 @@ func (n localNamer) Name(t *types.Type) string { if len(n.localPackage.Package) != 0 && n.localPackage.Package == t.Name.Package { return t.Name.Name } + // For non-local and non-fundamental types, use an absolute reference + // see https://protobuf.com/docs/language-spec#type-references + if strings.Contains(t.Name.Package, ".") { + return fmt.Sprintf(".%s", t.Name) + } return t.Name.String() } type protobufNamer struct { - packages []*protobufPackage + packages []*protobufPackage + // The key here is a Go import-path. packagesByPath map[string]*protobufPackage } @@ -58,17 +64,9 @@ func (n *protobufNamer) Name(t *types.Type) string { return t.Name.String() } -func (n *protobufNamer) List() []generator.Package { - packages := make([]generator.Package, 0, len(n.packages)) - for i := range n.packages { - packages = append(packages, n.packages[i]) - } - return packages -} - func (n *protobufNamer) Add(p *protobufPackage) { - if _, ok := n.packagesByPath[p.PackagePath]; !ok { - n.packagesByPath[p.PackagePath] = p + if _, ok := n.packagesByPath[p.Path()]; !ok { + n.packagesByPath[p.Path()] = p n.packages = append(n.packages, p) } } @@ -77,7 +75,7 @@ func (n *protobufNamer) GoNameToProtoName(name types.Name) types.Name { if p, ok := n.packagesByPath[name.Package]; ok { return types.Name{ Name: name.Name, - Package: p.PackageName, + Package: p.Name(), Path: p.ImportPath(), } } @@ -85,7 +83,7 @@ func (n *protobufNamer) GoNameToProtoName(name types.Name) types.Name { if _, ok := p.FilterTypes[name]; ok { return types.Name{ Name: name.Name, - Package: p.PackageName, + Package: p.Name(), Path: p.ImportPath(), } } @@ -94,8 +92,8 @@ func (n *protobufNamer) GoNameToProtoName(name types.Name) types.Name { } func protoSafePackage(name string) string { - pkg := strings.Replace(name, "/", ".", -1) - return strings.Replace(pkg, "-", "_", -1) + pkg := strings.ReplaceAll(name, "/", ".") + return strings.ReplaceAll(pkg, "-", "_") } type typeNameSet map[types.Name]*protobufPackage @@ -116,7 +114,7 @@ func assignGoTypeToProtoPackage(p *protobufPackage, t *types.Type, local, global } return } - if t.Name.Package == p.PackagePath { + if t.Name.Package == p.Path() { // Associate types only to their own package global[t.Name] = p } @@ -182,7 +180,7 @@ func (n *protobufNamer) AssignTypesToPackages(c *generator.Context) error { optional := make(map[types.Name]struct{}) p.Imports = NewImportTracker(p.ProtoTypeName()) for _, t := range c.Order { - if t.Name.Package != p.PackagePath { + if t.Name.Package != p.Path() { continue } if !isTypeApplicableToProtobuf(t) { diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go index bed4c3e3..b31a7c4d 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/package.go @@ -25,41 +25,31 @@ import ( "reflect" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/types" ) -func newProtobufPackage(packagePath, packageName string, generateAll bool, omitFieldTypes map[types.Name]struct{}) *protobufPackage { +func newProtobufPackage(packagePath, packageDir, packageName string, generateAll bool, omitFieldTypes map[types.Name]struct{}) *protobufPackage { pkg := &protobufPackage{ - DefaultPackage: generator.DefaultPackage{ + SimpleTarget: generator.SimpleTarget{ // The protobuf package name (foo.bar.baz) - PackageName: packageName, - // A path segment relative to the GOPATH root (foo/bar/baz) - PackagePath: packagePath, - HeaderText: []byte( - ` -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -`), - PackageDocumentation: []byte(fmt.Sprintf( - `// Package %s is an autogenerated protobuf IDL. -`, packageName)), + PkgName: packageName, + PkgPath: packagePath, + PkgDir: packageDir, + HeaderComment: []byte("// This file was autogenerated by go-to-protobuf. Do not edit it manually!\n\n"), + PkgDocComment: []byte(fmt.Sprintf("// Package %s is an autogenerated protobuf IDL.\n", packageName)), }, GenerateAll: generateAll, OmitFieldTypes: omitFieldTypes, } pkg.FilterFunc = pkg.filterFunc - pkg.GeneratorFunc = pkg.generatorFunc + pkg.GeneratorsFunc = pkg.generatorsFunc return pkg } // protobufPackage contains the protobuf implementation of Package. type protobufPackage struct { - generator.DefaultPackage - - // If true, this package has been vendored into our source tree and thus can - // only be generated by changing the vendor tree. - Vendored bool + generator.SimpleTarget // If true, generate protobuf serializations for all public types. // If false, only generate protobuf serializations for structs that @@ -89,9 +79,9 @@ type protobufPackage struct { Imports *ImportTracker } -func (p *protobufPackage) Clean(outputBase string) error { +func (p *protobufPackage) Clean() error { for _, s := range []string{p.ImportPath(), p.OutputPath()} { - if err := os.Remove(filepath.Join(outputBase, s)); err != nil && !os.IsNotExist(err) { + if err := os.Remove(filepath.Join(p.Dir(), filepath.Base(s))); err != nil && !os.IsNotExist(err) { return err } } @@ -179,17 +169,17 @@ func (p *protobufPackage) ExtractGeneratedType(t *ast.TypeSpec) bool { return true } -func (p *protobufPackage) generatorFunc(c *generator.Context) []generator.Generator { +func (p *protobufPackage) generatorsFunc(c *generator.Context) []generator.Generator { generators := []generator.Generator{} p.Imports.AddNullable() generators = append(generators, &genProtoIDL{ - DefaultGen: generator.DefaultGen{ - OptionalName: "generated", + GoGenerator: generator.GoGenerator{ + OutputFilename: "generated", // the extension is added later }, - localPackage: types.Name{Package: p.PackageName, Path: p.PackagePath}, - localGoPackage: types.Name{Package: p.PackagePath, Name: p.GoPackageName()}, + localPackage: types.Name{Package: p.Name(), Path: p.Path()}, + localGoPackage: types.Name{Package: p.Path(), Name: p.GoPackageName()}, imports: p.Imports, generateAll: p.GenerateAll, omitGogo: p.OmitGogo, @@ -199,17 +189,17 @@ func (p *protobufPackage) generatorFunc(c *generator.Context) []generator.Genera } func (p *protobufPackage) GoPackageName() string { - return filepath.Base(p.PackagePath) + return filepath.Base(p.Path()) } func (p *protobufPackage) ImportPath() string { - return filepath.Join(p.PackagePath, "generated.proto") + return filepath.Join(p.Path(), "generated.proto") } func (p *protobufPackage) OutputPath() string { - return filepath.Join(p.PackagePath, "generated.pb.go") + return filepath.Join(p.Path(), "generated.pb.go") } var ( - _ = generator.Package(&protobufPackage{}) + _ = generator.Target(&protobufPackage{}) ) diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go index 1d27f958..32f9e0da 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/parser.go @@ -109,11 +109,9 @@ func RewriteGeneratedGogoProtobufFile(name string, extractFn ExtractFunc, option // as being "optional" (they may be nil on the wire). This allows protobuf to serialize a map or slice and // properly discriminate between empty and nil (which is not possible in protobuf). // TODO: move into upstream gogo-protobuf once https://github.com/gogo/protobuf/issues/181 -// -// has agreement +// has agreement func rewriteOptionalMethods(decl ast.Decl, isOptional OptionalFunc) { - switch t := decl.(type) { - case *ast.FuncDecl: + if t, ok := decl.(*ast.FuncDecl); ok { ident, ptr, ok := receiver(t) if !ok { return @@ -151,8 +149,7 @@ type optionalAssignmentVisitor struct { // Visit walks the provided node, transforming field initializations of the form // m.Field = &OptionalType{} -> m.Field = OptionalType{} func (v optionalAssignmentVisitor) Visit(n ast.Node) ast.Visitor { - switch t := n.(type) { - case *ast.AssignStmt: + if t, ok := n.(*ast.AssignStmt); ok { if len(t.Lhs) == 1 && len(t.Rhs) == 1 { if !isFieldSelector(t.Lhs[0], "m", "") { return nil @@ -196,13 +193,11 @@ func (v *optionalItemsVisitor) Visit(n ast.Node) ast.Visitor { t.Lhs[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} } } - switch rhs := t.Rhs[0].(type) { - case *ast.CallExpr: + if rhs, ok := t.Rhs[0].(*ast.CallExpr); ok { if ident, ok := rhs.Fun.(*ast.Ident); ok && ident.Name == "append" { ast.Walk(v, rhs) if len(rhs.Args) > 0 { - switch arg := rhs.Args[0].(type) { - case *ast.Ident: + if arg, ok := rhs.Args[0].(*ast.Ident); ok { if arg.Name == "m" { rhs.Args[0] = &ast.StarExpr{X: &ast.Ident{Name: "m"}} } @@ -213,8 +208,7 @@ func (v *optionalItemsVisitor) Visit(n ast.Node) ast.Visitor { } } case *ast.IfStmt: - switch cond := t.Cond.(type) { - case *ast.BinaryExpr: + if cond, ok := t.Cond.(*ast.BinaryExpr); ok { if cond.Op == token.EQL { if isFieldSelector(cond.X, "m", "Items") && isIdent(cond.Y, "nil") { cond.X = &ast.StarExpr{X: &ast.Ident{Name: "m"}} @@ -226,8 +220,7 @@ func (v *optionalItemsVisitor) Visit(n ast.Node) ast.Visitor { // if err := m[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { // return err // } - switch s := t.Init.(type) { - case *ast.AssignStmt: + if s, ok := t.Init.(*ast.AssignStmt); ok { if call, ok := s.Rhs[0].(*ast.CallExpr); ok { if sel, ok := call.Fun.(*ast.SelectorExpr); ok { if x, ok := sel.X.(*ast.IndexExpr); ok { @@ -303,15 +296,13 @@ func receiver(f *ast.FuncDecl) (ident *ast.Ident, pointer bool, ok bool) { // dropExistingTypeDeclarations removes any type declaration for which extractFn returns true. The function // returns true if the entire declaration should be dropped. func dropExistingTypeDeclarations(decl ast.Decl, extractFn ExtractFunc) bool { - switch t := decl.(type) { - case *ast.GenDecl: + if t, ok := decl.(*ast.GenDecl); ok { if t.Tok != token.TYPE { return false } specs := []ast.Spec{} for _, s := range t.Specs { - switch spec := s.(type) { - case *ast.TypeSpec: + if spec, ok := s.(*ast.TypeSpec); ok { if extractFn(spec) { continue } @@ -330,15 +321,13 @@ func dropExistingTypeDeclarations(decl ast.Decl, extractFn ExtractFunc) bool { // to prevent generation from being able to define side-effects. The function returns true // if the entire declaration should be dropped. func dropEmptyImportDeclarations(decl ast.Decl) bool { - switch t := decl.(type) { - case *ast.GenDecl: + if t, ok := decl.(*ast.GenDecl); ok { if t.Tok != token.IMPORT { return false } specs := []ast.Spec{} for _, s := range t.Specs { - switch spec := s.(type) { - case *ast.ImportSpec: + if spec, ok := s.(*ast.ImportSpec); ok { if spec.Name != nil && spec.Name.Name == "_" { continue } diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go index 6cfa3788..44ca07d1 100644 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go @@ -17,7 +17,7 @@ limitations under the License. package protobuf import ( - "k8s.io/gengo/types" + "k8s.io/gengo/v2" "k8s.io/klog/v2" ) @@ -25,7 +25,7 @@ import ( // it exists, the value is boolean. If the tag did not exist, it returns // false. func extractBoolTagOrDie(key string, lines []string) bool { - val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) + val, err := gengo.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { klog.Fatal(err) } diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore b/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore deleted file mode 100644 index a5c47b66..00000000 --- a/vendor/k8s.io/code-generator/cmd/import-boss/.gitignore +++ /dev/null @@ -1 +0,0 @@ -import-boss diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/README.md b/vendor/k8s.io/code-generator/cmd/import-boss/README.md deleted file mode 100644 index 88dc010e..00000000 --- a/vendor/k8s.io/code-generator/cmd/import-boss/README.md +++ /dev/null @@ -1,97 +0,0 @@ -## Purpose - -- `import-boss` enforces import restrictions against all pull requests submitted to the [k/k](https://github.com/kubernetes/kubernetes) repository. There are a number of `.import-restrictions` files that in the [k/k](https://github.com/kubernetes/kubernetes) repository, all of which are defined in `YAML` (or `JSON`) format. - -## How does it work? - -- When a directory is verified, `import-boss` looks for a file called `.import-restrictions`. If this file is not found, `import-boss` will go up to the parent directory until it finds this `.import-restrictions` file. - -- Adding `.import-restrictions` files does not add them to CI runs. They need to be explicitly added to `hack/verify-import-boss.sh`. Once an `.import-restrictions` file is added, all of the sub-packages of this file's directory are added as well. - -### What are Rules? - -- If an `.import-restrictions` file is found, then all imports of the package are checked against each `rule` in the file. A `rule` consists of three parts: - - A `SelectorRegexp`, to select the import paths that the rule applies to. - - A list of `AllowedPrefixes` - - A list of `ForbiddenPrefixes` - -- An import is allowed if it matches at least one allowed prefix and does not match any forbidden prefixes. An example `.import-restrictions` file looks like this: - -```json -{ - "Rules": [ - { - "SelectorRegexp": "k8s[.]io", - "AllowedPrefixes": [ - "k8s.io/gengo/examples", - "k8s.io/kubernetes/third_party" - ], - "ForbiddenPrefixes": [ - "k8s.io/kubernetes/pkg/third_party/deprecated" - ] - }, - { - "SelectorRegexp": "^unsafe$", - "AllowedPrefixes": [ - ], - "ForbiddenPrefixes": [ - "" - ] - } - ] -} -``` -- Take note of `"SelectorRegexp": "k8s[.]io"` in the first block. This specifies that we are applying these rules to the `"k8s.io"` import path. -- The second block explicitly matches the "unsafe" package, and forbids it ("" is a prefix of everything). - -### What are Inverse Rules? - -- In contrast to non-inverse rules, which are defined in importing packages, inverse rules are defined in imported packages. - -- Inverse rules allow for fine-grained import restrictions for "private packages" where we don't want to spread use inside of [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes). - -- If an `.import-restrictions` file is found, then all imports of the package are checked against each `inverse rule` in the file. This check will continue, climbing up the directory tree, until a match is found and accepted. - -- Inverse rules also have a boolean `transitive` option. When this option is true, the import rule is also applied to `transitive` imports. - - `transitive` imports are dependencies not directly depended on by the code, but are needed to run the application. Use this option if you want to apply restrictions to those indirect dependencies. - -```yaml -rules: - - selectorRegexp: k8s[.]io - allowedPrefixes: - - k8s.io/gengo/examples - - k8s.io/kubernetes/third_party - forbiddenPrefixes: - - k8s.io/kubernetes/pkg/third_party/deprecated - - selectorRegexp: ^unsafe$ - forbiddenPrefixes: - - "" -inverseRules: - - selectorRegexp: k8s[.]io - allowedPrefixes: - - k8s.io/same-repo - - k8s.io/kubernetes/pkg/legacy - forbiddenPrefixes: - - k8s.io/kubernetes/pkg/legacy/subpkg - - selectorRegexp: k8s[.]io - transitive: true - forbiddenPrefixes: - - k8s.io/kubernetes/cmd/kubelet - - k8s.io/kubernetes/cmd/kubectl -``` - -## How do I run import-boss within the k/k repo? - -- In order to include _test.go files, make sure to pass in the `include-test-files` flag: - ```sh - hack/verify-import-boss.sh --include-test-files=true - ``` - -- To include other directories, pass in a directory or directories using the `input-dirs` flag: - ```sh - hack/verify-import-boss.sh --input-dirs="k8s.io/kubernetes/test/e2e/framework/..." - ``` - -## Reference - -- [import-boss](https://github.com/kubernetes/gengo/tree/master/examples/import-boss) \ No newline at end of file diff --git a/vendor/k8s.io/code-generator/cmd/import-boss/main.go b/vendor/k8s.io/code-generator/cmd/import-boss/main.go deleted file mode 100644 index 34373c54..00000000 --- a/vendor/k8s.io/code-generator/cmd/import-boss/main.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// import-boss enforces import restrictions in a given repository. -package main - -import ( - "os" - - "github.com/spf13/pflag" - "k8s.io/gengo/args" - "k8s.io/gengo/examples/import-boss/generators" - - "k8s.io/klog/v2" -) - -func main() { - klog.InitFlags(nil) - arguments := args.Default() - - pflag.CommandLine.BoolVar(&arguments.IncludeTestFiles, "include-test-files", false, "If true, include *_test.go files.") - - if err := arguments.Execute( - generators.NameSystems(), - generators.DefaultNameSystem(), - generators.Packages, - ); err != nil { - klog.Errorf("Error: %v", err) - os.Exit(1) - } - klog.V(2).Info("Completed successfully.") -} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go index ffd073a8..8052578c 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/args/args.go @@ -18,18 +18,18 @@ package args import ( "fmt" - "path" "github.com/spf13/pflag" - codegenutil "k8s.io/code-generator/pkg/util" - "k8s.io/gengo/args" ) -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs struct { - VersionedClientSetPackage string - InternalClientSetPackage string - ListersPackage string +// Args is used by the gengo framework to pass args specific to this generator. +type Args struct { + OutputDir string // must be a directory path + OutputPkg string // must be a Go import-path + GoHeaderFile string + VersionedClientSetPackage string // must be a Go import-path + InternalClientSetPackage string // must be a Go import-path + ListersPackage string // must be a Go import-path SingleDirectory bool // PluralExceptions define a list of pluralizer exceptions in Type:PluralType format. @@ -37,47 +37,46 @@ type CustomArgs struct { PluralExceptions []string } -// NewDefaults returns default arguments for the generator. -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - genericArgs := args.Default().WithoutDefaultFlagParsing() - customArgs := &CustomArgs{ - SingleDirectory: false, - PluralExceptions: []string{"Endpoints:Endpoints"}, +// New returns default arguments for the generator. +func New() *Args { + return &Args{ + SingleDirectory: false, } - genericArgs.CustomArgs = customArgs - - if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { - genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/informers") - customArgs.VersionedClientSetPackage = path.Join(pkg, "pkg/client/clientset/versioned") - customArgs.InternalClientSetPackage = path.Join(pkg, "pkg/client/clientset/internalversion") - customArgs.ListersPackage = path.Join(pkg, "pkg/client/listers") - } - - return genericArgs, customArgs } // AddFlags add the generator flags to the flag set. -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&ca.InternalClientSetPackage, "internal-clientset-package", ca.InternalClientSetPackage, "the full package name for the internal clientset to use") - fs.StringVar(&ca.VersionedClientSetPackage, "versioned-clientset-package", ca.VersionedClientSetPackage, "the full package name for the versioned clientset to use") - fs.StringVar(&ca.ListersPackage, "listers-package", ca.ListersPackage, "the full package name for the listers to use") - fs.BoolVar(&ca.SingleDirectory, "single-directory", ca.SingleDirectory, "if true, omit the intermediate \"internalversion\" and \"externalversions\" subdirectories") - fs.StringSliceVar(&ca.PluralExceptions, "plural-exceptions", ca.PluralExceptions, "list of comma separated plural exception definitions in Type:PluralizedType format") +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.OutputDir, "output-dir", "", + "the base directory under which to generate results") + fs.StringVar(&args.OutputPkg, "output-pkg", args.OutputPkg, + "the Go import-path of the generated results") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") + fs.StringVar(&args.InternalClientSetPackage, "internal-clientset-package", args.InternalClientSetPackage, + "the Go import-path of the internal clientset to use") + fs.StringVar(&args.VersionedClientSetPackage, "versioned-clientset-package", args.VersionedClientSetPackage, + "the Go import-path of the versioned clientset to use") + fs.StringVar(&args.ListersPackage, "listers-package", args.ListersPackage, + "the Go import-path of the listers to use") + fs.BoolVar(&args.SingleDirectory, "single-directory", args.SingleDirectory, + "if true, omit the intermediate \"internalversion\" and \"externalversions\" subdirectories") + fs.StringSliceVar(&args.PluralExceptions, "plural-exceptions", args.PluralExceptions, + "list of comma separated plural exception definitions in Type:PluralizedType format") } // Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - customArgs := genericArgs.CustomArgs.(*CustomArgs) - - if len(genericArgs.OutputPackagePath) == 0 { - return fmt.Errorf("output package cannot be empty") +func (args *Args) Validate() error { + if len(args.OutputDir) == 0 { + return fmt.Errorf("--output-dir must be specified") } - if len(customArgs.VersionedClientSetPackage) == 0 { - return fmt.Errorf("versioned clientset package cannot be empty") + if len(args.OutputPkg) == 0 { + return fmt.Errorf("--output-pkg must be specified") } - if len(customArgs.ListersPackage) == 0 { - return fmt.Errorf("listers package cannot be empty") + if len(args.VersionedClientSetPackage) == 0 { + return fmt.Errorf("--versioned-clientset-package must be specified") + } + if len(args.ListersPackage) == 0 { + return fmt.Errorf("--listers-package must be specified") } - return nil } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go index 080670f2..b27cfeb0 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -21,9 +21,9 @@ import ( "path" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) @@ -31,7 +31,7 @@ import ( // factoryGenerator produces a file of listers for a given GroupVersion and // type. type factoryGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string imports namer.ImportTracker groupVersions map[string]clientgentypes.GroupVersions @@ -75,6 +75,7 @@ func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w i } m := map[string]interface{}{ "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), + "cacheTransformFunc": c.Universe.Type(cacheTransformFunc), "groupVersions": g.groupVersions, "gvInterfaces": gvInterfaces, "gvNewFuncs": gvNewFuncs, @@ -109,6 +110,7 @@ type sharedInformerFactory struct { lock {{.syncMutex|raw}} defaultResync {{.timeDuration|raw}} customResync map[{{.reflectType|raw}}]{{.timeDuration|raw}} + transform {{.cacheTransformFunc|raw}} informers map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}} // startedInformers is used for tracking which informers have been started. @@ -147,6 +149,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform {{.cacheTransformFunc|raw}}) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -234,7 +244,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj {{.runtimeObject|raw}}, newFunc {{.interfacesNewInformerFunc|raw}}) {{.cacheSharedIndexInformer|raw}} { f.lock.Lock() @@ -252,11 +262,11 @@ func (f *sharedInformerFactory) InformerFor(obj {{.runtimeObject|raw}}, newFunc } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer } - ` var sharedInformerFactoryInterface = ` @@ -289,7 +299,8 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. - Start(stopCh <-chan struct{}) + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. + Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new // informers can be started anymore and Start will return without @@ -310,7 +321,7 @@ type SharedInformerFactory interface { // ForResource gives generic access to a shared informer of the matching type. ForResource(resource {{.schemaGroupVersionResource|raw}}) (GenericInformer, error) - // InternalInformerFor returns the SharedIndexInformer for obj using an internal + // InformerFor returns the SharedIndexInformer for obj using an internal // client. InformerFor(obj {{.runtimeObject|raw}}, newFunc {{.interfacesNewInformerFunc|raw}}) {{.cacheSharedIndexInformer|raw}} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go index 70826eba..278f8ed9 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go @@ -19,9 +19,9 @@ package generators import ( "io" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) @@ -29,7 +29,7 @@ import ( // factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for // informer registration type factoryInterfaceGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string imports namer.ImportTracker clientSetPackage string diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go index a5a42953..d8e8873a 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/generic.go @@ -23,14 +23,14 @@ import ( clientgentypes "k8s.io/code-generator/cmd/client-gen/types" codegennamer "k8s.io/code-generator/pkg/namer" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) // genericGenerator generates the generic informer. type genericGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string imports namer.ImportTracker groupVersions map[string]clientgentypes.GroupVersions diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go index 0bba93c4..5342e25d 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/groupinterface.go @@ -18,18 +18,18 @@ package generators import ( "io" - "path/filepath" + "path" "strings" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) // groupInterfaceGenerator generates the per-group interface file. type groupInterfaceGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string imports namer.ImportTracker groupVersions clientgentypes.GroupVersions @@ -70,7 +70,7 @@ func (g *groupInterfaceGenerator) GenerateType(c *generator.Context, t *types.Ty versions := make([]versionData, 0, len(g.groupVersions.Versions)) for _, version := range g.groupVersions.Versions { gv := clientgentypes.GroupVersion{Group: g.groupVersions.Group, Version: version.Version} - versionPackage := filepath.Join(g.outputPackage, strings.ToLower(gv.Version.NonEmpty())) + versionPackage := path.Join(g.outputPackage, strings.ToLower(gv.Version.NonEmpty())) iface := c.Universe.Type(types.Name{Package: versionPackage, Name: "Interface"}) versions = append(versions, versionData{ Name: namer.IC(version.Version.NonEmpty()), diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go index da00e6e6..9d786ea0 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/informer.go @@ -21,9 +21,9 @@ import ( "io" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" @@ -34,7 +34,7 @@ import ( // informerGenerator produces a file of listers for a given GroupVersion and // type. type informerGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string groupPkgName string groupVersion clientgentypes.GroupVersion diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/targets.go similarity index 56% rename from vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go rename to vendor/k8s.io/code-generator/cmd/informer-gen/generators/targets.go index dd2c9cce..65dc89d0 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/targets.go @@ -22,16 +22,15 @@ import ( "path/filepath" "strings" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - "k8s.io/klog/v2" - "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - informergenargs "k8s.io/code-generator/cmd/informer-gen/args" + "k8s.io/code-generator/cmd/informer-gen/args" genutil "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" + "k8s.io/klog/v2" ) // NameSystems returns the name system used by the generators in this package. @@ -85,37 +84,34 @@ func isInternal(m types.Member) bool { return !strings.Contains(m.Tags, "json") } -func packageForInternalInterfaces(base string) string { - return filepath.Join(base, "internalinterfaces") -} +const subdirForInternalInterfaces = "internalinterfaces" -// Packages makes the client package definition. -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +// GetTargets makes the client target definition. +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, "", gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) - if !ok { - klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) + internalVersionOutputDir := args.OutputDir + internalVersionOutputPkg := args.OutputPkg + externalVersionOutputDir := args.OutputDir + externalVersionOutputPkg := args.OutputPkg + if !args.SingleDirectory { + internalVersionOutputDir = filepath.Join(internalVersionOutputDir, "internalversion") + internalVersionOutputPkg = path.Join(internalVersionOutputPkg, "internalversion") + externalVersionOutputDir = filepath.Join(externalVersionOutputDir, "externalversions") + externalVersionOutputPkg = path.Join(externalVersionOutputPkg, "externalversions") } - internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) - externalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) - if !customArgs.SingleDirectory { - internalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "internalversion") - externalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "externalversions") - } - - var packageList generator.Packages + var targetList []generator.Target typesForGroupVersion := make(map[clientgentypes.GroupVersion][]*types.Type) externalGroupVersions := make(map[string]clientgentypes.GroupVersions) internalGroupVersions := make(map[string]clientgentypes.GroupVersions) groupGoNames := make(map[string]string) - for _, inputDir := range arguments.InputDirs { - p := context.Universe.Package(genutil.Vendorless(inputDir)) + for _, inputPkg := range context.Inputs { + p := context.Universe.Package(inputPkg) objectMeta, internal, err := objectMetaForPackage(p) if err != nil { @@ -148,14 +144,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // If there's a comment of the form "// +groupName=somegroup" or // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the // group when generating. - if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + if override := gengo.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { gv.Group = clientgentypes.Group(override[0]) } // If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as // the Go group identifier in CamelCase. It defaults groupGoNames[groupPackageName] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0]) - if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { + if override := gengo.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil { groupGoNames[groupPackageName] = namer.IC(override[0]) } @@ -191,57 +187,80 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat typesToGenerate = orderer.OrderTypes(typesToGenerate) if internal { - packageList = append(packageList, versionPackage(internalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.InternalClientSetPackage, customArgs.ListersPackage)) + targetList = append(targetList, + versionTarget( + internalVersionOutputDir, internalVersionOutputPkg, + groupPackageName, gv, groupGoNames[groupPackageName], + boilerplate, typesToGenerate, + args.InternalClientSetPackage, args.ListersPackage)) } else { - packageList = append(packageList, versionPackage(externalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.VersionedClientSetPackage, customArgs.ListersPackage)) + targetList = append(targetList, + versionTarget( + externalVersionOutputDir, externalVersionOutputPkg, + groupPackageName, gv, groupGoNames[groupPackageName], + boilerplate, typesToGenerate, + args.VersionedClientSetPackage, args.ListersPackage)) } } if len(externalGroupVersions) != 0 { - packageList = append(packageList, factoryInterfacePackage(externalVersionPackagePath, boilerplate, customArgs.VersionedClientSetPackage)) - packageList = append(packageList, factoryPackage(externalVersionPackagePath, boilerplate, groupGoNames, genutil.PluralExceptionListToMapOrDie(customArgs.PluralExceptions), externalGroupVersions, - customArgs.VersionedClientSetPackage, - typesForGroupVersion)) + targetList = append(targetList, + factoryInterfaceTarget( + externalVersionOutputDir, externalVersionOutputPkg, + boilerplate, args.VersionedClientSetPackage)) + targetList = append(targetList, + factoryTarget( + externalVersionOutputDir, externalVersionOutputPkg, + boilerplate, groupGoNames, genutil.PluralExceptionListToMapOrDie(args.PluralExceptions), + externalGroupVersions, args.VersionedClientSetPackage, typesForGroupVersion)) for _, gvs := range externalGroupVersions { - packageList = append(packageList, groupPackage(externalVersionPackagePath, gvs, boilerplate)) + targetList = append(targetList, + groupTarget(externalVersionOutputDir, externalVersionOutputPkg, gvs, boilerplate)) } } if len(internalGroupVersions) != 0 { - packageList = append(packageList, factoryInterfacePackage(internalVersionPackagePath, boilerplate, customArgs.InternalClientSetPackage)) - packageList = append(packageList, factoryPackage(internalVersionPackagePath, boilerplate, groupGoNames, genutil.PluralExceptionListToMapOrDie(customArgs.PluralExceptions), internalGroupVersions, customArgs.InternalClientSetPackage, typesForGroupVersion)) + targetList = append(targetList, + factoryInterfaceTarget(internalVersionOutputDir, internalVersionOutputPkg, boilerplate, args.InternalClientSetPackage)) + targetList = append(targetList, + factoryTarget( + internalVersionOutputDir, internalVersionOutputPkg, + boilerplate, groupGoNames, genutil.PluralExceptionListToMapOrDie(args.PluralExceptions), + internalGroupVersions, args.InternalClientSetPackage, typesForGroupVersion)) for _, gvs := range internalGroupVersions { - packageList = append(packageList, groupPackage(internalVersionPackagePath, gvs, boilerplate)) + targetList = append(targetList, + groupTarget(internalVersionOutputDir, internalVersionOutputPkg, gvs, boilerplate)) } } - return packageList + return targetList } -func factoryPackage(basePackage string, boilerplate []byte, groupGoNames, pluralExceptions map[string]string, groupVersions map[string]clientgentypes.GroupVersions, clientSetPackage string, - typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type) generator.Package { - return &generator.DefaultPackage{ - PackageName: filepath.Base(basePackage), - PackagePath: basePackage, - HeaderText: boilerplate, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { +func factoryTarget(outputDirBase, outputPkgBase string, boilerplate []byte, groupGoNames, pluralExceptions map[string]string, groupVersions map[string]clientgentypes.GroupVersions, clientSetPackage string, + typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type) generator.Target { + return &generator.SimpleTarget{ + PkgName: path.Base(outputDirBase), + PkgPath: outputPkgBase, + PkgDir: outputDirBase, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = append(generators, &factoryGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: "factory", + GoGenerator: generator.GoGenerator{ + OutputFilename: "factory.go", }, - outputPackage: basePackage, + outputPackage: outputPkgBase, imports: generator.NewImportTracker(), groupVersions: groupVersions, clientSetPackage: clientSetPackage, - internalInterfacesPackage: packageForInternalInterfaces(basePackage), + internalInterfacesPackage: path.Join(outputPkgBase, subdirForInternalInterfaces), gvGoNames: groupGoNames, }) generators = append(generators, &genericGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: "generic", + GoGenerator: generator.GoGenerator{ + OutputFilename: "generic.go", }, - outputPackage: basePackage, + outputPackage: outputPkgBase, imports: generator.NewImportTracker(), groupVersions: groupVersions, pluralExceptions: pluralExceptions, @@ -254,19 +273,21 @@ func factoryPackage(basePackage string, boilerplate []byte, groupGoNames, plural } } -func factoryInterfacePackage(basePackage string, boilerplate []byte, clientSetPackage string) generator.Package { - packagePath := packageForInternalInterfaces(basePackage) +func factoryInterfaceTarget(outputDirBase, outputPkgBase string, boilerplate []byte, clientSetPackage string) generator.Target { + outputDir := filepath.Join(outputDirBase, subdirForInternalInterfaces) + outputPkg := path.Join(outputPkgBase, subdirForInternalInterfaces) - return &generator.DefaultPackage{ - PackageName: filepath.Base(packagePath), - PackagePath: packagePath, - HeaderText: boilerplate, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return &generator.SimpleTarget{ + PkgName: path.Base(outputDir), + PkgPath: outputPkg, + PkgDir: outputDir, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = append(generators, &factoryInterfaceGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: "factory_interfaces", + GoGenerator: generator.GoGenerator{ + OutputFilename: "factory_interfaces.go", }, - outputPackage: packagePath, + outputPackage: outputPkg, imports: generator.NewImportTracker(), clientSetPackage: clientSetPackage, }) @@ -276,23 +297,25 @@ func factoryInterfacePackage(basePackage string, boilerplate []byte, clientSetPa } } -func groupPackage(basePackage string, groupVersions clientgentypes.GroupVersions, boilerplate []byte) generator.Package { - packagePath := filepath.Join(basePackage, groupVersions.PackageName) +func groupTarget(outputDirBase, outputPackageBase string, groupVersions clientgentypes.GroupVersions, boilerplate []byte) generator.Target { + outputDir := filepath.Join(outputDirBase, groupVersions.PackageName) + outputPkg := path.Join(outputPackageBase, groupVersions.PackageName) groupPkgName := strings.Split(string(groupVersions.PackageName), ".")[0] - return &generator.DefaultPackage{ - PackageName: groupPkgName, - PackagePath: packagePath, - HeaderText: boilerplate, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return &generator.SimpleTarget{ + PkgName: groupPkgName, + PkgPath: outputPkg, + PkgDir: outputDir, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = append(generators, &groupInterfaceGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: "interface", + GoGenerator: generator.GoGenerator{ + OutputFilename: "interface.go", }, - outputPackage: packagePath, + outputPackage: outputPkg, groupVersions: groupVersions, imports: generator.NewImportTracker(), - internalInterfacesPackage: packageForInternalInterfaces(basePackage), + internalInterfacesPackage: path.Join(outputPackageBase, subdirForInternalInterfaces), }) return generators }, @@ -303,30 +326,33 @@ func groupPackage(basePackage string, groupVersions clientgentypes.GroupVersions } } -func versionPackage(basePackage string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, clientSetPackage, listersPackage string) generator.Package { - packagePath := filepath.Join(basePackage, groupPkgName, strings.ToLower(gv.Version.NonEmpty())) - - return &generator.DefaultPackage{ - PackageName: strings.ToLower(gv.Version.NonEmpty()), - PackagePath: packagePath, - HeaderText: boilerplate, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { +func versionTarget(outputDirBase, outputPkgBase string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, clientSetPackage, listersPackage string) generator.Target { + subdir := []string{groupPkgName, strings.ToLower(gv.Version.NonEmpty())} + outputDir := filepath.Join(outputDirBase, filepath.Join(subdir...)) + outputPkg := path.Join(outputPkgBase, path.Join(subdir...)) + + return &generator.SimpleTarget{ + PkgName: strings.ToLower(gv.Version.NonEmpty()), + PkgPath: outputPkg, + PkgDir: outputDir, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = append(generators, &versionInterfaceGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: "interface", + GoGenerator: generator.GoGenerator{ + OutputFilename: "interface.go", }, - outputPackage: packagePath, + outputPackage: outputPkg, imports: generator.NewImportTracker(), types: typesToGenerate, - internalInterfacesPackage: packageForInternalInterfaces(basePackage), + internalInterfacesPackage: path.Join(outputPkgBase, subdirForInternalInterfaces), }) for _, t := range typesToGenerate { generators = append(generators, &informerGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: strings.ToLower(t.Name.Name), + GoGenerator: generator.GoGenerator{ + OutputFilename: strings.ToLower(t.Name.Name) + ".go", }, - outputPackage: packagePath, + outputPackage: outputPkg, groupPkgName: groupPkgName, groupVersion: gv, groupGoName: groupGoName, @@ -334,7 +360,7 @@ func versionPackage(basePackage string, groupPkgName string, gv clientgentypes.G imports: generator.NewImportTracker(), clientSetPackage: clientSetPackage, listersPackage: listersPackage, - internalInterfacesPackage: packageForInternalInterfaces(basePackage), + internalInterfacesPackage: path.Join(outputPkgBase, subdirForInternalInterfaces), }) } return generators diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go index 27d4bd51..4ca511ea 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go @@ -16,7 +16,7 @@ limitations under the License. package generators -import "k8s.io/gengo/types" +import "k8s.io/gengo/v2/types" var ( apiScheme = types.Name{Package: "k8s.io/kubernetes/pkg/api/legacyscheme", Name: "Scheme"} @@ -28,6 +28,7 @@ var ( cacheNewGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewGenericLister"} cacheNewSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewSharedIndexInformer"} cacheSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "SharedIndexInformer"} + cacheTransformFunc = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "TransformFunc"} listOptions = types.Name{Package: "k8s.io/kubernetes/pkg/apis/core", Name: "ListOptions"} reflectType = types.Name{Package: "reflect", Name: "Type"} runtimeObject = types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Object"} diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go index 3b51f8dc..5f9a0c21 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/versioninterface.go @@ -19,16 +19,16 @@ package generators import ( "io" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/code-generator/cmd/client-gen/generators/util" ) // versionInterfaceGenerator generates the per-version interface file. type versionInterfaceGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string imports namer.ImportTracker types []*types.Type diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go index 1247c35b..b0fc4851 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/main.go @@ -20,39 +20,38 @@ import ( "flag" "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/informer-gen/args" "k8s.io/code-generator/cmd/informer-gen/generators" "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" "k8s.io/klog/v2" - - generatorargs "k8s.io/code-generator/cmd/informer-gen/args" ) func main() { klog.InitFlags(nil) - genericArgs, customArgs := generatorargs.NewDefaults() - - // Override defaults. - // TODO: move out of informer-gen - genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated" - customArgs.VersionedClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" - customArgs.InternalClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - customArgs.ListersPackage = "k8s.io/kubernetes/pkg/client/listers" + args := args.New() - genericArgs.AddFlags(pflag.CommandLine) - customArgs.AddFlags(pflag.CommandLine) + args.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { klog.Fatalf("Error: %v", err) } + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + // Run it. - if err := genericArgs.Execute( - generators.NameSystems(util.PluralExceptionListToMapOrDie(customArgs.PluralExceptions)), + if err := gengo.Execute( + generators.NameSystems(util.PluralExceptionListToMapOrDie(args.PluralExceptions)), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) } diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go index 17033450..e6b9e00a 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/args/args.go @@ -18,47 +18,45 @@ package args import ( "fmt" - "path" "github.com/spf13/pflag" - codegenutil "k8s.io/code-generator/pkg/util" - "k8s.io/gengo/args" ) -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs struct { +// Args is used by the gengo framework to pass args specific to this generator. +type Args struct { + OutputDir string // must be a directory path + OutputPkg string // must be a Go import-path + GoHeaderFile string + // PluralExceptions specify list of exceptions used when pluralizing certain types. // For example 'Endpoints:Endpoints', otherwise the pluralizer will generate 'Endpointes'. PluralExceptions []string } -// NewDefaults returns default arguments for the generator. -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - genericArgs := args.Default().WithoutDefaultFlagParsing() - customArgs := &CustomArgs{ - PluralExceptions: []string{"Endpoints:Endpoints"}, - } - genericArgs.CustomArgs = customArgs - - if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 { - genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/listers") - } - - return genericArgs, customArgs +// New returns default arguments for the generator. +func New() *Args { + return &Args{} } // AddFlags add the generator flags to the flag set. -func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) { - fs.StringSliceVar(&ca.PluralExceptions, "plural-exceptions", ca.PluralExceptions, "list of comma separated plural exception definitions in Type:PluralizedType format") +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.OutputDir, "output-dir", "", + "the base directory under which to generate results") + fs.StringVar(&args.OutputPkg, "output-pkg", "", + "the base Go import-path under which to generate results") + fs.StringSliceVar(&args.PluralExceptions, "plural-exceptions", args.PluralExceptions, + "list of comma separated plural exception definitions in Type:PluralizedType format") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") } // Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - _ = genericArgs.CustomArgs.(*CustomArgs) - - if len(genericArgs.OutputPackagePath) == 0 { - return fmt.Errorf("output package cannot be empty") +func (args *Args) Validate() error { + if len(args.OutputDir) == 0 { + return fmt.Errorf("--output-dir must be specified") + } + if len(args.OutputPkg) == 0 { + return fmt.Errorf("--output-pkg must be specified") } - return nil } diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go index dd45d774..4755f2ed 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/expansion.go @@ -22,17 +22,18 @@ import ( "path/filepath" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/types" + "k8s.io/klog/v2" "k8s.io/code-generator/cmd/client-gen/generators/util" ) // expansionGenerator produces a file for a expansion interfaces. type expansionGenerator struct { - generator.DefaultGen - packagePath string - types []*types.Type + generator.GoGenerator + outputPath string + types []*types.Type } // We only want to call GenerateType() once per group. @@ -44,11 +45,16 @@ func (g *expansionGenerator) GenerateType(c *generator.Context, t *types.Type, w sw := generator.NewSnippetWriter(w, c, "$", "$") for _, t := range g.types { tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) - if _, err := os.Stat(filepath.Join(g.packagePath, strings.ToLower(t.Name.Name+"_expansion.go"))); os.IsNotExist(err) { + manualFile := filepath.Join(g.outputPath, strings.ToLower(t.Name.Name+"_expansion.go")) + if _, err := os.Stat(manualFile); err == nil { + klog.V(4).Infof("file %q exists, not generating", manualFile) + } else if os.IsNotExist(err) { sw.Do(expansionInterfaceTemplate, t) if !tags.NonNamespaced { sw.Do(namespacedExpansionInterfaceTemplate, t) } + } else { + return err } } return sw.Error() diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go index 8ada4946..8955e076 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -19,17 +19,17 @@ package generators import ( "fmt" "io" + "path" "path/filepath" "strings" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - + "k8s.io/code-generator/cmd/lister-gen/args" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) @@ -59,16 +59,16 @@ func DefaultNameSystem() string { return "public" } -// Packages makes the client package definition. -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +// GetTargets makes the client target definition. +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, "", gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - var packageList generator.Packages - for _, inputDir := range arguments.InputDirs { - p := context.Universe.Package(inputDir) + var targetList []generator.Target + for _, inputPkg := range context.Inputs { + p := context.Universe.Package(inputPkg) objectMeta, internal, err := objectMetaForPackage(p) if err != nil { @@ -101,7 +101,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // If there's a comment of the form "// +groupName=somegroup" or // "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the // group when generating. - if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { + if override := gengo.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil { gv.Group = clientgentypes.Group(strings.SplitN(override[0], ".", 2)[0]) } @@ -119,26 +119,33 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)} typesToGenerate = orderer.OrderTypes(typesToGenerate) - packagePath := filepath.Join(arguments.OutputPackagePath, groupPackageName, strings.ToLower(gv.Version.NonEmpty())) - packageList = append(packageList, &generator.DefaultPackage{ - PackageName: strings.ToLower(gv.Version.NonEmpty()), - PackagePath: packagePath, - HeaderText: boilerplate, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + subdir := []string{groupPackageName, strings.ToLower(gv.Version.NonEmpty())} + outputDir := filepath.Join(args.OutputDir, filepath.Join(subdir...)) + outputPkg := path.Join(args.OutputPkg, path.Join(subdir...)) + targetList = append(targetList, &generator.SimpleTarget{ + PkgName: strings.ToLower(gv.Version.NonEmpty()), + PkgPath: outputPkg, + PkgDir: outputDir, + HeaderComment: boilerplate, + FilterFunc: func(c *generator.Context, t *types.Type) bool { + tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) + return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("get") + }, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { generators = append(generators, &expansionGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: "expansion_generated", + GoGenerator: generator.GoGenerator{ + OutputFilename: "expansion_generated.go", }, - packagePath: filepath.Join(arguments.OutputBase, packagePath), - types: typesToGenerate, + outputPath: outputDir, + types: typesToGenerate, }) for _, t := range typesToGenerate { generators = append(generators, &listerGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: strings.ToLower(t.Name.Name), + GoGenerator: generator.GoGenerator{ + OutputFilename: strings.ToLower(t.Name.Name) + ".go", }, - outputPackage: arguments.OutputPackagePath, + outputPackage: outputPkg, groupVersion: gv, internalGVPkg: internalGVPkg, typeToGenerate: t, @@ -148,14 +155,10 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } return generators }, - FilterFunc: func(c *generator.Context, t *types.Type) bool { - tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)) - return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("get") - }, }) } - return packageList + return targetList } // objectMetaForPackage returns the type of ObjectMeta used by package p. @@ -187,7 +190,7 @@ func isInternal(m types.Member) bool { // listerGenerator produces a file of listers for a given GroupVersion and // type. type listerGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string groupVersion clientgentypes.GroupVersion internalGVPkg string @@ -212,6 +215,7 @@ func (g *listerGenerator) Imports(c *generator.Context) (imports []string) { imports = append(imports, g.imports.ImportLines()...) imports = append(imports, "k8s.io/apimachinery/pkg/api/errors") imports = append(imports, "k8s.io/apimachinery/pkg/labels") + imports = append(imports, "k8s.io/client-go/listers") // for Indexer imports = append(imports, "k8s.io/client-go/tools/cache") return @@ -233,25 +237,21 @@ func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io } if tags.NonNamespaced { - sw.Do(typeListerInterface_NonNamespaced, m) + sw.Do(typeListerInterfaceNonNamespaced, m) } else { sw.Do(typeListerInterface, m) } sw.Do(typeListerStruct, m) sw.Do(typeListerConstructor, m) - sw.Do(typeLister_List, m) if tags.NonNamespaced { - sw.Do(typeLister_NonNamespacedGet, m) return sw.Error() } - sw.Do(typeLister_NamespaceLister, m) + sw.Do(typeListerNamespaceLister, m) sw.Do(namespaceListerInterface, m) sw.Do(namespaceListerStruct, m) - sw.Do(namespaceLister_List, m) - sw.Do(namespaceLister_Get, m) return sw.Error() } @@ -269,7 +269,7 @@ type $.type|public$Lister interface { } ` -var typeListerInterface_NonNamespaced = ` +var typeListerInterfaceNonNamespaced = ` // $.type|public$Lister helps list $.type|publicPlural$. // All objects returned here must be treated as read-only. type $.type|public$Lister interface { @@ -283,48 +283,27 @@ type $.type|public$Lister interface { } ` +// This embeds a typed resource indexer instead of aliasing, so that the struct +// is available as a receiver for methods specific to the generated type +// (from the corresponding expansion interface). var typeListerStruct = ` // $.type|private$Lister implements the $.type|public$Lister interface. type $.type|private$Lister struct { - indexer cache.Indexer + listers.ResourceIndexer[*$.type|raw$] } ` var typeListerConstructor = ` // New$.type|public$Lister returns a new $.type|public$Lister. func New$.type|public$Lister(indexer cache.Indexer) $.type|public$Lister { - return &$.type|private$Lister{indexer: indexer} -} -` - -var typeLister_List = ` -// List lists all $.type|publicPlural$ in the indexer. -func (s *$.type|private$Lister) List(selector labels.Selector) (ret []*$.type|raw$, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*$.type|raw$)) - }) - return ret, err + return &$.type|private$Lister{listers.New[*$.type|raw$](indexer, $.Resource|raw$("$.type|lowercaseSingular$"))} } ` -var typeLister_NamespaceLister = ` +var typeListerNamespaceLister = ` // $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$. func (s *$.type|private$Lister) $.type|publicPlural$(namespace string) $.type|public$NamespaceLister { - return $.type|private$NamespaceLister{indexer: s.indexer, namespace: namespace} -} -` - -var typeLister_NonNamespacedGet = ` -// Get retrieves the $.type|public$ from the index for a given name. -func (s *$.type|private$Lister) Get(name string) (*$.type|raw$, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name) - } - return obj.(*$.type|raw$), nil + return $.type|private$NamespaceLister{listers.NewNamespaced[*$.type|raw$](s.ResourceIndexer, namespace)} } ` @@ -342,35 +321,13 @@ type $.type|public$NamespaceLister interface { } ` +// This embeds a typed namespaced resource indexer instead of aliasing, so that the struct +// is available as a receiver for methods specific to the generated type +// (from the corresponding expansion interface). var namespaceListerStruct = ` // $.type|private$NamespaceLister implements the $.type|public$NamespaceLister // interface. type $.type|private$NamespaceLister struct { - indexer cache.Indexer - namespace string -} -` - -var namespaceLister_List = ` -// List lists all $.type|publicPlural$ in the indexer for a given namespace. -func (s $.type|private$NamespaceLister) List(selector labels.Selector) (ret []*$.type|raw$, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*$.type|raw$)) - }) - return ret, err -} -` - -var namespaceLister_Get = ` -// Get retrieves the $.type|public$ from the indexer for a given namespace and name. -func (s $.type|private$NamespaceLister) Get(name string) (*$.type|raw$, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name) - } - return obj.(*$.type|raw$), nil + listers.ResourceIndexer[*$.type|raw$] } ` diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go index a7d7b610..8dde18ba 100644 --- a/vendor/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/lister-gen/main.go @@ -20,36 +20,38 @@ import ( "flag" "github.com/spf13/pflag" + "k8s.io/code-generator/cmd/lister-gen/args" "k8s.io/code-generator/cmd/lister-gen/generators" "k8s.io/code-generator/pkg/util" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" "k8s.io/klog/v2" - - generatorargs "k8s.io/code-generator/cmd/lister-gen/args" ) func main() { klog.InitFlags(nil) - genericArgs, customArgs := generatorargs.NewDefaults() - - // Override defaults. - // TODO: move this out of lister-gen - genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers" + args := args.New() - genericArgs.AddFlags(pflag.CommandLine) - customArgs.AddFlags(pflag.CommandLine) + args.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { klog.Fatalf("Error: %v", err) } + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + // Run it. - if err := genericArgs.Execute( - generators.NameSystems(util.PluralExceptionListToMapOrDie(customArgs.PluralExceptions)), + if err := gengo.Execute( + generators.NameSystems(util.PluralExceptionListToMapOrDie(args.PluralExceptions)), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) } diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go b/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go index 2e3ab084..cc1fdafc 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/args/args.go @@ -19,19 +19,30 @@ package args import ( "fmt" - "k8s.io/gengo/args" + "github.com/spf13/pflag" ) -// NewDefaults returns default arguments for the generator. -func NewDefaults() *args.GeneratorArgs { - genericArgs := args.Default().WithoutDefaultFlagParsing() - genericArgs.OutputFileBaseName = "zz_generated.register" - return genericArgs +type Args struct { + OutputFile string + GoHeaderFile string +} + +// New returns default arguments for the generator. +func New() *Args { + return &Args{} +} + +// AddFlags add the generator flags to the flag set. +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.OutputFile, "output-file", "generated.register.go", + "the name of the file to be generated") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") } // Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - if len(genericArgs.OutputFileBaseName) == 0 { +func (args *Args) Validate() error { + if len(args.OutputFile) == 0 { return fmt.Errorf("output file base name cannot be empty") } diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go index c831c575..7eb7793e 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/register_external.go @@ -21,13 +21,13 @@ import ( "sort" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) type registerExternalGenerator struct { - generator.DefaultGen + generator.GoGenerator outputPackage string gv clientgentypes.GroupVersion typesToGenerate []*types.Type @@ -91,7 +91,7 @@ var ( // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder - // Depreciated: use Install instead + // Deprecated: use Install instead AddToScheme = localSchemeBuilder.AddToScheme Install = localSchemeBuilder.AddToScheme ) diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/targets.go similarity index 73% rename from vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go rename to vendor/k8s.io/code-generator/cmd/register-gen/generators/targets.go index 242eb3aa..d2998261 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/targets.go @@ -25,10 +25,11 @@ import ( "k8s.io/klog/v2" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/code-generator/cmd/register-gen/args" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) // NameSystems returns the name system used by the generators in this package. @@ -42,29 +43,29 @@ func DefaultNameSystem() string { return "public" } -// Packages makes packages to generate. -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +// GetTargets makes targets to generate. +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - packages := generator.Packages{} - for _, inputDir := range arguments.InputDirs { - pkg := context.Universe.Package(inputDir) + targets := []generator.Target{} + for _, input := range context.Inputs { + pkg := context.Universe.Package(input) internal, err := isInternal(pkg) if err != nil { - klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) + klog.V(5).Infof("skipping the generation of %s file, due to err %v", args.OutputFile, err) continue } if internal { - klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) + klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", args.OutputFile, pkg.Name) continue } registerFileName := "register.go" - searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName) + searchPath := path.Join(pkg.Dir, registerFileName) if _, err := os.Stat(path.Join(searchPath)); err == nil { - klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) + klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", args.OutputFile, registerFileName, searchPath) continue } else if err != nil && !os.IsNotExist(err) { klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) @@ -82,7 +83,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // if there is a comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", // extract the fully qualified API group name from it and overwrite the group inferred from the package path - if override := types.ExtractCommentTags("+", pkg.Comments)["groupName"]; override != nil { + if override := gengo.ExtractCommentTags("+", pkg.Comments)["groupName"]; override != nil { groupName := override[0] klog.V(5).Infof("overriding the group name with = %s", groupName) gv.Group = clientgentypes.Group(groupName) @@ -93,22 +94,23 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat for _, t := range pkg.Types { klog.V(5).Infof("considering type = %s", t.Name.String()) for _, typeMember := range t.Members { - if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { + if typeMember.Name == "TypeMeta" && typeMember.Embedded { typesToRegister = append(typesToRegister, t) } } } - packages = append(packages, - &generator.DefaultPackage{ - PackageName: pkg.Name, - PackagePath: pkg.Path, - HeaderText: boilerplate, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + targets = append(targets, + &generator.SimpleTarget{ + PkgName: pkg.Name, + PkgPath: pkg.Path, // output to same pkg as input + PkgDir: pkg.Dir, // output to same pkg as input + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { return []generator.Generator{ ®isterExternalGenerator{ - DefaultGen: generator.DefaultGen{ - OptionalName: arguments.OutputFileBaseName, + GoGenerator: generator.GoGenerator{ + OutputFilename: args.OutputFile, }, gv: gv, typesToGenerate: typesToRegister, @@ -120,7 +122,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat }) } - return packages + return targets } // isInternal determines whether the given package diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/main.go b/vendor/k8s.io/code-generator/cmd/register-gen/main.go index dc291444..ac28f87f 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/main.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/main.go @@ -20,28 +20,35 @@ import ( "flag" "github.com/spf13/pflag" - "k8s.io/klog/v2" - - generatorargs "k8s.io/code-generator/cmd/register-gen/args" + "k8s.io/code-generator/cmd/register-gen/args" "k8s.io/code-generator/cmd/register-gen/generators" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/klog/v2" ) func main() { klog.InitFlags(nil) - genericArgs := generatorargs.NewDefaults() - genericArgs.AddFlags(pflag.CommandLine) + args := args.New() + args.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { klog.Fatalf("Error: %v", err) } - if err := genericArgs.Execute( + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + + if err := gengo.Execute( generators.NameSystems(), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + pflag.Args(), ); err != nil { klog.Fatalf("Error: %v", err) } diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore b/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore deleted file mode 100644 index ffe6458c..00000000 --- a/vendor/k8s.io/code-generator/cmd/set-gen/.gitignore +++ /dev/null @@ -1 +0,0 @@ -set-gen diff --git a/vendor/k8s.io/code-generator/cmd/set-gen/main.go b/vendor/k8s.io/code-generator/cmd/set-gen/main.go deleted file mode 100644 index 0968ce76..00000000 --- a/vendor/k8s.io/code-generator/cmd/set-gen/main.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// set-gen is an example usage of gengo. -// -// Structs in the input directories with the below line in their comments will -// have sets generated for them. -// // +genset -// -// Any builtin type referenced anywhere in the input directories will have a -// set generated for it. -package main - -import ( - "os" - - "k8s.io/gengo/args" - "k8s.io/gengo/examples/set-gen/generators" - - "k8s.io/klog/v2" -) - -func main() { - klog.InitFlags(nil) - arguments := args.Default() - - // Override defaults. - arguments.InputDirs = []string{"k8s.io/kubernetes/pkg/util/sets/types"} - arguments.OutputPackagePath = "k8s.io/apimachinery/pkg/util/sets" - - if err := arguments.Execute( - generators.NameSystems(), - generators.DefaultNameSystem(), - generators.Packages, - ); err != nil { - klog.Errorf("Error: %v", err) - os.Exit(1) - } - klog.V(2).Info("Completed successfully.") -} diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh index 223423b6..65b0abda 100644 --- a/vendor/k8s.io/code-generator/generate-groups.sh +++ b/vendor/k8s.io/code-generator/generate-groups.sh @@ -14,93 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -o errexit -set -o nounset -set -o pipefail - -# generate-groups generates everything for a project with external types only, e.g. a project based -# on CustomResourceDefinitions. - -if [ "$#" -lt 4 ] || [ "${1}" == "--help" ]; then - cat < ... - - the generators comma separated to run (deepcopy,defaulter,client,lister,informer) or "all". - the output package name (e.g. github.com/example/project/pkg/generated). - the external types dir (e.g. github.com/example/api or github.com/example/project/pkg/apis). - the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative - to . - ... arbitrary flags passed to all generator binaries. - - -Examples: - $(basename "$0") all github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" - $(basename "$0") deepcopy,client github.com/example/project/pkg/client github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" -EOF - exit 0 -fi - -GENS="$1" -OUTPUT_PKG="$2" -APIS_PKG="$3" -GROUPS_WITH_VERSIONS="$4" -shift 4 - -( - # To support running this script from anywhere, first cd into this directory, - # and then install with forced module mode on and fully qualified name. - cd "$(dirname "${0}")" - GO111MODULE=on go install k8s.io/code-generator/cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen} -) -# Go installs the above commands to get installed in $GOBIN if defined, and $GOPATH/bin otherwise: -GOBIN="$(go env GOBIN)" -gobin="${GOBIN:-$(go env GOPATH)/bin}" - -function codegen::join() { local IFS="$1"; shift; echo "$*"; } - -# enumerate group versions -FQ_APIS=() # e.g. k8s.io/api/apps/v1 -for GVs in ${GROUPS_WITH_VERSIONS}; do - IFS=: read -r G Vs <<<"${GVs}" - - # enumerate versions - for V in ${Vs//,/ }; do - FQ_APIS+=("${APIS_PKG}/${G}/${V}") - done -done - -if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then - echo "Generating deepcopy funcs" - "${gobin}/deepcopy-gen" \ - --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \ - -O zz_generated.deepcopy \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then - echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" - "${gobin}/client-gen" \ - --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" \ - --input-base "" \ - --input "$(codegen::join , "${FQ_APIS[@]}")" \ - --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then - echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" - "${gobin}/lister-gen" \ - --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \ - --output-package "${OUTPUT_PKG}/listers" \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then - echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" - "${gobin}/informer-gen" \ - --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \ - --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \ - --listers-package "${OUTPUT_PKG}/listers" \ - --output-package "${OUTPUT_PKG}/informers" \ - "$@" -fi +echo "ERROR: $(basename "$0") has been removed." +echo "ERROR: Please use k8s.io/code-generator/kube_codegen.sh instead." +echo +exit 1 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh index 5dfda879..65b0abda 100644 --- a/vendor/k8s.io/code-generator/generate-internal-groups.sh +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -14,133 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -o errexit -set -o nounset -set -o pipefail - -# generate-internal-groups generates everything for a project with internal types, e.g. an -# user-provided API server based on k8s.io/apiserver. - -if [ "$#" -lt 5 ] || [ "${1}" == "--help" ]; then - cat < ... - - the generators comma separated to run (deepcopy,defaulter,conversion,client,lister,informer,openapi) or "all". - the output package name (e.g. github.com/example/project/pkg/generated). - the internal types dir (e.g. github.com/example/project/pkg/apis). - the external types dir (e.g. github.com/example/project/pkg/apis or githubcom/example/apis). - the groups and their versions in the format "groupA:v1,v2 groupB:v1 groupC:v2", relative - to . - ... arbitrary flags passed to all generator binaries. - -Examples: - $(basename "$0") all github.com/example/project/pkg/client github.com/example/project/pkg/apis github.com/example/project/pkg/apis "foo:v1 bar:v1alpha1,v1beta1" - $(basename "$0") deepcopy,defaulter,conversion github.com/example/project/pkg/client github.com/example/project/pkg/apis github.com/example/project/apis "foo:v1 bar:v1alpha1,v1beta1" -EOF - exit 0 -fi - -GENS="$1" -OUTPUT_PKG="$2" -INT_APIS_PKG="$3" -EXT_APIS_PKG="$4" -GROUPS_WITH_VERSIONS="$5" -shift 5 - -( - # To support running this script from anywhere, first cd into this directory, - # and then install with forced module mode on and fully qualified name. - cd "$(dirname "${0}")" - GO111MODULE=on go install k8s.io/code-generator/cmd/{defaulter-gen,conversion-gen,client-gen,lister-gen,informer-gen,deepcopy-gen,openapi-gen} -) - -function codegen::join() { local IFS="$1"; shift; echo "$*"; } - -# enumerate group versions -ALL_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps k8s.io/api/apps/v1 -INT_FQ_APIS=() # e.g. k8s.io/kubernetes/pkg/apis/apps -EXT_FQ_APIS=() # e.g. k8s.io/api/apps/v1 -for GVs in ${GROUPS_WITH_VERSIONS}; do - IFS=: read -r G Vs <<<"${GVs}" - - if [ -n "${INT_APIS_PKG}" ]; then - ALL_FQ_APIS+=("${INT_APIS_PKG}/${G}") - INT_FQ_APIS+=("${INT_APIS_PKG}/${G}") - fi - - # enumerate versions - for V in ${Vs//,/ }; do - ALL_FQ_APIS+=("${EXT_APIS_PKG}/${G}/${V}") - EXT_FQ_APIS+=("${EXT_APIS_PKG}/${G}/${V}") - done -done - -if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then - echo "Generating deepcopy funcs" - "${GOPATH}/bin/deepcopy-gen" \ - --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.deepcopy \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "defaulter" <<<"${GENS}"; then - echo "Generating defaulters" - "${GOPATH}/bin/defaulter-gen" \ - --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}")" -O zz_generated.defaults \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "conversion" <<<"${GENS}"; then - echo "Generating conversions" - "${GOPATH}/bin/conversion-gen" \ - --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" -O zz_generated.conversion \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then - echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" - if [ -n "${INT_APIS_PKG}" ]; then - IFS=" " read -r -a APIS <<< "$(printf '%s/ ' "${INT_FQ_APIS[@]}")" - "${GOPATH}/bin/client-gen" \ - --clientset-name "${CLIENTSET_NAME_INTERNAL:-internalversion}" \ - --input-base "" \ - --input "$(codegen::join , "${APIS[@]}")" \ - --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" \ - "$@" - fi - "${GOPATH}/bin/client-gen" \ - --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" \ - --input-base "" \ - --input "$(codegen::join , "${EXT_FQ_APIS[@]}")" \ - --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}" \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then - echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" - "${GOPATH}/bin/lister-gen" \ - --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" \ - --output-package "${OUTPUT_PKG}/listers" \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then - echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" - "${GOPATH}/bin/informer-gen" \ - --input-dirs "$(codegen::join , "${ALL_FQ_APIS[@]}")" \ - --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned}" \ - --internal-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_INTERNAL:-internalversion}" \ - --listers-package "${OUTPUT_PKG}/listers" \ - --output-package "${OUTPUT_PKG}/informers" \ - "$@" -fi - -if [ "${GENS}" = "all" ] || grep -qw "openapi" <<<"${GENS}"; then - echo "Generating OpenAPI definitions for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/openapi" - declare -a OPENAPI_EXTRA_PACKAGES - "${GOPATH}/bin/openapi-gen" \ - --input-dirs "$(codegen::join , "${EXT_FQ_APIS[@]}" "${OPENAPI_EXTRA_PACKAGES[@]+"${OPENAPI_EXTRA_PACKAGES[@]}"}")" \ - --input-dirs "k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version" \ - --output-package "${OUTPUT_PKG}/openapi" \ - -O zz_generated.openapi \ - "$@" -fi +echo "ERROR: $(basename "$0") has been removed." +echo "ERROR: Please use k8s.io/code-generator/kube_codegen.sh instead." +echo +exit 1 diff --git a/vendor/k8s.io/code-generator/kube_codegen.sh b/vendor/k8s.io/code-generator/kube_codegen.sh new file mode 100644 index 00000000..f57c52ca --- /dev/null +++ b/vendor/k8s.io/code-generator/kube_codegen.sh @@ -0,0 +1,759 @@ +#!/usr/bin/env bash + +# Copyright 2023 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This presents several functions for packages which want to use kubernetes +# code-generation tools. + +# These functions insist that your input IDL (commented go) files be located in +# go packages following the pattern $input_pkg_root/$something_sans_slash/$api_version . +# Those $something_sans_slash will be propagated into the output directory structure. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_CODEGEN_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" + +function kube::codegen::internal::findz() { + # We use `find` rather than `git ls-files` because sometimes external + # projects use this across repos. This is an imperfect wrapper of find, + # but good enough for this script. + find "$@" -print0 +} + +function kube::codegen::internal::grep() { + # We use `grep` rather than `git grep` because sometimes external projects + # use this across repos. + grep "$@" \ + --exclude-dir .git \ + --exclude-dir _output \ + --exclude-dir vendor +} + +# Generate tagged helper code: conversions, deepcopy, and defaults +# +# USAGE: kube::codegen::gen_helpers [FLAGS] +# +# +# The root directory under which to search for Go files which request code to +# be generated. This must be a local path, not a Go package. +# +# See note at the top about package structure below that. +# +# FLAGS: +# +# --boilerplate +# An optional override for the header file to insert into generated files. +# +# --extra-peer-dir +# An optional list (this flag may be specified multiple times) of "extra" +# directories to consider during conversion generation. +# +function kube::codegen::gen_helpers() { + local in_dir="" + local boilerplate="${KUBE_CODEGEN_ROOT}/hack/boilerplate.go.txt" + local v="${KUBE_VERBOSE:-0}" + local extra_peers=() + + while [ "$#" -gt 0 ]; do + case "$1" in + "--boilerplate") + boilerplate="$2" + shift 2 + ;; + "--extra-peer-dir") + extra_peers+=("$2") + shift 2 + ;; + *) + if [[ "$1" =~ ^-- ]]; then + echo "unknown argument: $1" >&2 + return 1 + fi + if [ -n "$in_dir" ]; then + echo "too many arguments: $1 (already have $in_dir)" >&2 + return 1 + fi + in_dir="$1" + shift + ;; + esac + done + + if [ -z "${in_dir}" ]; then + echo "input-dir argument is required" >&2 + return 1 + fi + + ( + # To support running this from anywhere, first cd into this directory, + # and then install with forced module mode on and fully qualified name. + cd "${KUBE_CODEGEN_ROOT}" + BINS=( + conversion-gen + deepcopy-gen + defaulter-gen + ) + # shellcheck disable=2046 # printf word-splitting is intentional + GO111MODULE=on go install $(printf "k8s.io/code-generator/cmd/%s " "${BINS[@]}") + ) + # Go installs in $GOBIN if defined, and $GOPATH/bin otherwise + gobin="${GOBIN:-$(go env GOPATH)/bin}" + + # Deepcopy + # + local input_pkgs=() + while read -r dir; do + pkg="$(cd "${dir}" && GO111MODULE=on go list -find .)" + input_pkgs+=("${pkg}") + done < <( + ( kube::codegen::internal::grep -l --null \ + -e '^\s*//\s*+k8s:deepcopy-gen=' \ + -r "${in_dir}" \ + --include '*.go' \ + || true \ + ) | while read -r -d $'\0' F; do dirname "${F}"; done \ + | LC_ALL=C sort -u + ) + + if [ "${#input_pkgs[@]}" != 0 ]; then + echo "Generating deepcopy code for ${#input_pkgs[@]} targets" + + kube::codegen::internal::findz \ + "${in_dir}" \ + -type f \ + -name zz_generated.deepcopy.go \ + | xargs -0 rm -f + + "${gobin}/deepcopy-gen" \ + -v "${v}" \ + --output-file zz_generated.deepcopy.go \ + --go-header-file "${boilerplate}" \ + "${input_pkgs[@]}" + fi + + # Defaults + # + local input_pkgs=() + while read -r dir; do + pkg="$(cd "${dir}" && GO111MODULE=on go list -find .)" + input_pkgs+=("${pkg}") + done < <( + ( kube::codegen::internal::grep -l --null \ + -e '^\s*//\s*+k8s:defaulter-gen=' \ + -r "${in_dir}" \ + --include '*.go' \ + || true \ + ) | while read -r -d $'\0' F; do dirname "${F}"; done \ + | LC_ALL=C sort -u + ) + + if [ "${#input_pkgs[@]}" != 0 ]; then + echo "Generating defaulter code for ${#input_pkgs[@]} targets" + + kube::codegen::internal::findz \ + "${in_dir}" \ + -type f \ + -name zz_generated.defaults.go \ + | xargs -0 rm -f + + "${gobin}/defaulter-gen" \ + -v "${v}" \ + --output-file zz_generated.defaults.go \ + --go-header-file "${boilerplate}" \ + "${input_pkgs[@]}" + fi + + # Conversions + # + local input_pkgs=() + while read -r dir; do + pkg="$(cd "${dir}" && GO111MODULE=on go list -find .)" + input_pkgs+=("${pkg}") + done < <( + ( kube::codegen::internal::grep -l --null \ + -e '^\s*//\s*+k8s:conversion-gen=' \ + -r "${in_dir}" \ + --include '*.go' \ + || true \ + ) | while read -r -d $'\0' F; do dirname "${F}"; done \ + | LC_ALL=C sort -u + ) + + if [ "${#input_pkgs[@]}" != 0 ]; then + echo "Generating conversion code for ${#input_pkgs[@]} targets" + + kube::codegen::internal::findz \ + "${in_dir}" \ + -type f \ + -name zz_generated.conversion.go \ + | xargs -0 rm -f + + local extra_peer_args=() + for arg in "${extra_peers[@]:+"${extra_peers[@]}"}"; do + extra_peer_args+=("--extra-peer-dirs" "$arg") + done + "${gobin}/conversion-gen" \ + -v "${v}" \ + --output-file zz_generated.conversion.go \ + --go-header-file "${boilerplate}" \ + "${extra_peer_args[@]:+"${extra_peer_args[@]}"}" \ + "${input_pkgs[@]}" + fi +} + +# Generate openapi code +# +# USAGE: kube::codegen::gen_openapi [FLAGS] +# +# +# The root directory under which to search for Go files which request openapi +# to be generated. This must be a local path, not a Go package. +# +# See note at the top about package structure below that. +# +# FLAGS: +# +# --output-dir +# The directory into which to emit code. +# +# --output-pkg +# The Go package path (import path) of the --output-dir. +# +# --extra-pkgs +# An optional list of additional packages to be imported during openapi +# generation. The argument must be Go package syntax, e.g. +# "k8s.io/foo/bar". It may be a single value or a comma-delimited list. +# This flag may be repeated. +# +# --report-filename +# An optional path at which to write an API violations report. "-" means +# stdout. +# +# --update-report +# If specified, update the report file in place, rather than diffing it. +# +# --boilerplate +# An optional override for the header file to insert into generated files. +# +function kube::codegen::gen_openapi() { + local in_dir="" + local out_dir="" + local out_pkg="" + local extra_pkgs=() + local report="/dev/null" + local update_report="" + local boilerplate="${KUBE_CODEGEN_ROOT}/hack/boilerplate.go.txt" + local v="${KUBE_VERBOSE:-0}" + + while [ "$#" -gt 0 ]; do + case "$1" in + "--output-dir") + out_dir="$2" + shift 2 + ;; + "--output-pkg") + out_pkg="$2" + shift 2 + ;; + "--extra-pkgs") + extra_pkgs+=("$2") + shift 2 + ;; + "--report-filename") + report="$2" + shift 2 + ;; + "--update-report") + update_report="true" + shift + ;; + "--boilerplate") + boilerplate="$2" + shift 2 + ;; + *) + if [[ "$1" =~ ^-- ]]; then + echo "unknown argument: $1" >&2 + return 1 + fi + if [ -n "$in_dir" ]; then + echo "too many arguments: $1 (already have $in_dir)" >&2 + return 1 + fi + in_dir="$1" + shift + ;; + esac + done + + if [ -z "${in_dir}" ]; then + echo "input-dir argument is required" >&2 + return 1 + fi + if [ -z "${out_dir}" ]; then + echo "--output-dir is required" >&2 + return 1 + fi + if [ -z "${out_pkg}" ]; then + echo "--output-pkg is required" >&2 + return 1 + fi + + local new_report + new_report="$(mktemp -t "$(basename "$0").api_violations.XXXXXX")" + if [ -n "${update_report}" ]; then + new_report="${report}" + fi + + ( + # To support running this from anywhere, first cd into this directory, + # and then install with forced module mode on and fully qualified name. + cd "${KUBE_CODEGEN_ROOT}" + BINS=( + openapi-gen + ) + # shellcheck disable=2046 # printf word-splitting is intentional + GO111MODULE=on go install $(printf "k8s.io/kube-openapi/cmd/%s " "${BINS[@]}") + ) + # Go installs in $GOBIN if defined, and $GOPATH/bin otherwise + gobin="${GOBIN:-$(go env GOPATH)/bin}" + + local input_pkgs=( "${extra_pkgs[@]:+"${extra_pkgs[@]}"}") + while read -r dir; do + pkg="$(cd "${dir}" && GO111MODULE=on go list -find .)" + input_pkgs+=("${pkg}") + done < <( + ( kube::codegen::internal::grep -l --null \ + -e '^\s*//\s*+k8s:openapi-gen=' \ + -r "${in_dir}" \ + --include '*.go' \ + || true \ + ) | while read -r -d $'\0' F; do dirname "${F}"; done \ + | LC_ALL=C sort -u + ) + + if [ "${#input_pkgs[@]}" != 0 ]; then + echo "Generating openapi code for ${#input_pkgs[@]} targets" + + kube::codegen::internal::findz \ + "${in_dir}" \ + -type f \ + -name zz_generated.openapi.go \ + | xargs -0 rm -f + + "${gobin}/openapi-gen" \ + -v "${v}" \ + --output-file zz_generated.openapi.go \ + --go-header-file "${boilerplate}" \ + --output-dir "${out_dir}" \ + --output-pkg "${out_pkg}" \ + --report-filename "${new_report}" \ + "k8s.io/apimachinery/pkg/apis/meta/v1" \ + "k8s.io/apimachinery/pkg/runtime" \ + "k8s.io/apimachinery/pkg/version" \ + "${input_pkgs[@]}" + fi + + touch "${report}" # in case it doesn't exist yet + if ! diff -u "${report}" "${new_report}"; then + echo -e "ERROR:" + echo -e "\tAPI rule check failed for ${report}: new reported violations" + echo -e "\tPlease read api/api-rules/README.md" + return 1 + fi +} + +# Generate client code +# +# USAGE: kube::codegen::gen_client [FLAGS] +# +# +# The root package under which to search for Go files which request clients +# to be generated. This must be a local path, not a Go package. +# +# See note at the top about package structure below that. +# +# FLAGS: +# --one-input-api +# A specific API (a directory) under the input-dir for which to generate a +# client. If this is not set, clients for all APIs under the input-dir +# will be generated (under the --output-pkg). +# +# --output-dir +# The root directory under which to emit code. Each aspect of client +# generation will make one or more subdirectories. +# +# --output-pkg +# The Go package path (import path) of the --output-dir. Each aspect of +# client generation will make one or more sub-packages. +# +# --boilerplate +# An optional override for the header file to insert into generated files. +# +# --clientset-name +# An optional override for the leaf name of the generated "clientset" directory. +# +# --versioned-name +# An optional override for the leaf name of the generated +# "/versioned" directory. +# +# --with-applyconfig +# Enables generation of applyconfiguration files. +# +# --applyconfig-name +# An optional override for the leaf name of the generated "applyconfiguration" directory. +# +# --applyconfig-externals +# An optional list of comma separated external apply configurations locations +# in .: form. +# +# --with-watch +# Enables generation of listers and informers for APIs which support WATCH. +# +# --listers-name +# An optional override for the leaf name of the generated "listers" directory. +# +# --informers-name +# An optional override for the leaf name of the generated "informers" directory. +# +# --plural-exceptions +# An optional list of comma separated plural exception definitions in Type:PluralizedType form. +# +function kube::codegen::gen_client() { + local in_dir="" + local one_input_api="" + local out_dir="" + local out_pkg="" + local clientset_subdir="clientset" + local clientset_versioned_name="versioned" + local applyconfig="false" + local applyconfig_subdir="applyconfiguration" + local applyconfig_external="" + local applyconfig_openapi_schema="" + local watchable="false" + local listers_subdir="listers" + local informers_subdir="informers" + local boilerplate="${KUBE_CODEGEN_ROOT}/hack/boilerplate.go.txt" + local plural_exceptions="" + local v="${KUBE_VERBOSE:-0}" + + while [ "$#" -gt 0 ]; do + case "$1" in + "--one-input-api") + one_input_api="/$2" + shift 2 + ;; + "--output-dir") + out_dir="$2" + shift 2 + ;; + "--output-pkg") + out_pkg="$2" + shift 2 + ;; + "--boilerplate") + boilerplate="$2" + shift 2 + ;; + "--clientset-name") + clientset_subdir="$2" + shift 2 + ;; + "--versioned-name") + clientset_versioned_name="$2" + shift 2 + ;; + "--with-applyconfig") + applyconfig="true" + shift + ;; + "--applyconfig-name") + applyconfig_subdir="$2" + shift 2 + ;; + "--applyconfig-externals") + applyconfig_external="$2" + shift 2 + ;; + "--applyconfig-openapi-schema") + applyconfig_openapi_schema="$2" + shift 2 + ;; + "--with-watch") + watchable="true" + shift + ;; + "--listers-name") + listers_subdir="$2" + shift 2 + ;; + "--informers-name") + informers_subdir="$2" + shift 2 + ;; + "--plural-exceptions") + plural_exceptions="$2" + shift 2 + ;; + *) + if [[ "$1" =~ ^-- ]]; then + echo "unknown argument: $1" >&2 + return 1 + fi + if [ -n "$in_dir" ]; then + echo "too many arguments: $1 (already have $in_dir)" >&2 + return 1 + fi + in_dir="$1" + shift + ;; + esac + done + + if [ -z "${in_dir}" ]; then + echo "input-dir argument is required" >&2 + return 1 + fi + if [ -z "${out_dir}" ]; then + echo "--output-dir is required" >&2 + return 1 + fi + if [ -z "${out_pkg}" ]; then + echo "--output-pkg is required" >&2 + fi + + mkdir -p "${out_dir}" + + ( + # To support running this from anywhere, first cd into this directory, + # and then install with forced module mode on and fully qualified name. + cd "${KUBE_CODEGEN_ROOT}" + BINS=( + applyconfiguration-gen + client-gen + informer-gen + lister-gen + ) + # shellcheck disable=2046 # printf word-splitting is intentional + GO111MODULE=on go install $(printf "k8s.io/code-generator/cmd/%s " "${BINS[@]}") + ) + # Go installs in $GOBIN if defined, and $GOPATH/bin otherwise + gobin="${GOBIN:-$(go env GOPATH)/bin}" + + local group_versions=() + local input_pkgs=() + while read -r dir; do + pkg="$(cd "${dir}" && GO111MODULE=on go list -find .)" + leaf="$(basename "${dir}")" + if grep -E -q '^v[0-9]+((alpha|beta)[0-9]+)?$' <<< "${leaf}"; then + input_pkgs+=("${pkg}") + + dir2="$(dirname "${dir}")" + leaf2="$(basename "${dir2}")" + group_versions+=("${leaf2}/${leaf}") + fi + done < <( + ( kube::codegen::internal::grep -l --null \ + -e '^\s*//\s*+genclient' \ + -r "${in_dir}${one_input_api}" \ + --include '*.go' \ + || true \ + ) | while read -r -d $'\0' F; do dirname "${F}"; done \ + | LC_ALL=C sort -u + ) + + if [ "${#group_versions[@]}" == 0 ]; then + return 0 + fi + + applyconfig_pkg="" # set this for later use, iff enabled + if [ "${applyconfig}" == "true" ]; then + applyconfig_pkg="${out_pkg}/${applyconfig_subdir}" + + echo "Generating applyconfig code for ${#input_pkgs[@]} targets" + + ( kube::codegen::internal::grep -l --null \ + -e '^// Code generated by applyconfiguration-gen. DO NOT EDIT.$' \ + -r "${out_dir}/${applyconfig_subdir}" \ + --include '*.go' \ + || true \ + ) | xargs -0 rm -f + + "${gobin}/applyconfiguration-gen" \ + -v "${v}" \ + --go-header-file "${boilerplate}" \ + --output-dir "${out_dir}/${applyconfig_subdir}" \ + --output-pkg "${applyconfig_pkg}" \ + --external-applyconfigurations "${applyconfig_external}" \ + --openapi-schema "${applyconfig_openapi_schema}" \ + "${input_pkgs[@]}" + fi + + echo "Generating client code for ${#group_versions[@]} targets" + + ( kube::codegen::internal::grep -l --null \ + -e '^// Code generated by client-gen. DO NOT EDIT.$' \ + -r "${out_dir}/${clientset_subdir}" \ + --include '*.go' \ + || true \ + ) | xargs -0 rm -f + + local inputs=() + for arg in "${group_versions[@]}"; do + inputs+=("--input" "$arg") + done + "${gobin}/client-gen" \ + -v "${v}" \ + --go-header-file "${boilerplate}" \ + --output-dir "${out_dir}/${clientset_subdir}" \ + --output-pkg "${out_pkg}/${clientset_subdir}" \ + --clientset-name "${clientset_versioned_name}" \ + --apply-configuration-package "${applyconfig_pkg}" \ + --input-base "$(cd "${in_dir}" && pwd -P)" `# must be absolute path or Go import path"` \ + --plural-exceptions "${plural_exceptions}" \ + "${inputs[@]}" + + if [ "${watchable}" == "true" ]; then + echo "Generating lister code for ${#input_pkgs[@]} targets" + + ( kube::codegen::internal::grep -l --null \ + -e '^// Code generated by lister-gen. DO NOT EDIT.$' \ + -r "${out_dir}/${listers_subdir}" \ + --include '*.go' \ + || true \ + ) | xargs -0 rm -f + + "${gobin}/lister-gen" \ + -v "${v}" \ + --go-header-file "${boilerplate}" \ + --output-dir "${out_dir}/${listers_subdir}" \ + --output-pkg "${out_pkg}/${listers_subdir}" \ + --plural-exceptions "${plural_exceptions}" \ + "${input_pkgs[@]}" + + echo "Generating informer code for ${#input_pkgs[@]} targets" + + ( kube::codegen::internal::grep -l --null \ + -e '^// Code generated by informer-gen. DO NOT EDIT.$' \ + -r "${out_dir}/${informers_subdir}" \ + --include '*.go' \ + || true \ + ) | xargs -0 rm -f + + "${gobin}/informer-gen" \ + -v "${v}" \ + --go-header-file "${boilerplate}" \ + --output-dir "${out_dir}/${informers_subdir}" \ + --output-pkg "${out_pkg}/${informers_subdir}" \ + --versioned-clientset-package "${out_pkg}/${clientset_subdir}/${clientset_versioned_name}" \ + --listers-package "${out_pkg}/${listers_subdir}" \ + --plural-exceptions "${plural_exceptions}" \ + "${input_pkgs[@]}" + fi +} + +# Generate register code +# +# USAGE: kube::codegen::gen_register [FLAGS] +# +# +# The root directory under which to search for Go files which request code to +# be generated. This must be a local path, not a Go package. +# +# See note at the top about package structure below that. +# +# FLAGS: +# +# --boilerplate +# An optional override for the header file to insert into generated files. +# +function kube::codegen::gen_register() { + local in_dir="" + local boilerplate="${KUBE_CODEGEN_ROOT}/hack/boilerplate.go.txt" + local v="${KUBE_VERBOSE:-0}" + + while [ "$#" -gt 0 ]; do + case "$1" in + "--boilerplate") + boilerplate="$2" + shift 2 + ;; + *) + if [[ "$1" =~ ^-- ]]; then + echo "unknown argument: $1" >&2 + return 1 + fi + if [ -n "$in_dir" ]; then + echo "too many arguments: $1 (already have $in_dir)" >&2 + return 1 + fi + in_dir="$1" + shift + ;; + esac + done + + if [ -z "${in_dir}" ]; then + echo "input-dir argument is required" >&2 + return 1 + fi + + ( + # To support running this from anywhere, first cd into this directory, + # and then install with forced module mode on and fully qualified name. + cd "${KUBE_CODEGEN_ROOT}" + BINS=( + register-gen + ) + # shellcheck disable=2046 # printf word-splitting is intentional + GO111MODULE=on go install $(printf "k8s.io/code-generator/cmd/%s " "${BINS[@]}") + ) + # Go installs in $GOBIN if defined, and $GOPATH/bin otherwise + gobin="${GOBIN:-$(go env GOPATH)/bin}" + + # Register + # + local input_pkgs=() + while read -r dir; do + pkg="$(cd "${dir}" && GO111MODULE=on go list -find .)" + input_pkgs+=("${pkg}") + done < <( + ( kube::codegen::internal::grep -l --null \ + -e '^\s*//\s*+groupName' \ + -r "${in_dir}" \ + --include '*.go' \ + || true \ + ) | while read -r -d $'\0' F; do dirname "${F}"; done \ + | LC_ALL=C sort -u + ) + + if [ "${#input_pkgs[@]}" != 0 ]; then + echo "Generating register code for ${#input_pkgs[@]} targets" + + kube::codegen::internal::findz \ + "${in_dir}" \ + -type f \ + -name zz_generated.register.go \ + | xargs -0 rm -f + + "${gobin}/register-gen" \ + -v "${v}" \ + --output-file zz_generated.register.go \ + --go-header-file "${boilerplate}" \ + "${input_pkgs[@]}" + fi +} diff --git a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go index fd8c3a85..59ee489f 100644 --- a/vendor/k8s.io/code-generator/pkg/namer/tag-override.go +++ b/vendor/k8s.io/code-generator/pkg/namer/tag-override.go @@ -17,8 +17,9 @@ limitations under the License. package namer import ( - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) // TagOverrideNamer is a namer which pulls names from a given tag, if specified, @@ -49,7 +50,7 @@ func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer { // extractTag gets the comment-tags for the key. If the tag did not exist, it // returns the empty string. func extractTag(key string, lines []string) string { - val, present := types.ExtractCommentTags("+", lines)[key] + val, present := gengo.ExtractCommentTags("+", lines)[key] if !present || len(val) < 1 { return "" } diff --git a/vendor/k8s.io/code-generator/pkg/util/build.go b/vendor/k8s.io/code-generator/pkg/util/build.go deleted file mode 100644 index 53f93afe..00000000 --- a/vendor/k8s.io/code-generator/pkg/util/build.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - gobuild "go/build" - "path/filepath" - "strings" -) - -// CurrentPackage returns the go package of the current directory, or "" if it cannot -// be derived from the GOPATH. -func CurrentPackage() string { - for _, root := range gobuild.Default.SrcDirs() { - if pkg, ok := hasSubdir(root, "."); ok { - return pkg - } - } - return "" -} - -func hasSubdir(root, dir string) (rel string, ok bool) { - // ensure a tailing separator to properly compare on word-boundaries - const sep = string(filepath.Separator) - root = filepath.Clean(root) - if !strings.HasSuffix(root, sep) { - root += sep - } - - // check whether root dir starts with root - dir = filepath.Clean(dir) - if !strings.HasPrefix(dir, root) { - return "", false - } - - // cut off root - return filepath.ToSlash(dir[len(root):]), true -} - -// Vendorless trims vendor prefix from a package path to make it canonical -func Vendorless(p string) string { - if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { - return p[pos+len("/vendor/"):] - } - return p -} diff --git a/vendor/k8s.io/code-generator/tools.go b/vendor/k8s.io/code-generator/tools.go index fb797be0..4080b2c5 100644 --- a/vendor/k8s.io/code-generator/tools.go +++ b/vendor/k8s.io/code-generator/tools.go @@ -22,15 +22,14 @@ limitations under the License. package codegenerator import ( + _ "k8s.io/code-generator/cmd/applyconfiguration-gen" _ "k8s.io/code-generator/cmd/client-gen" _ "k8s.io/code-generator/cmd/conversion-gen" _ "k8s.io/code-generator/cmd/deepcopy-gen" _ "k8s.io/code-generator/cmd/defaulter-gen" _ "k8s.io/code-generator/cmd/go-to-protobuf" - _ "k8s.io/code-generator/cmd/import-boss" _ "k8s.io/code-generator/cmd/informer-gen" _ "k8s.io/code-generator/cmd/lister-gen" - _ "k8s.io/code-generator/cmd/openapi-gen" _ "k8s.io/code-generator/cmd/register-gen" - _ "k8s.io/code-generator/cmd/set-gen" + _ "k8s.io/kube-openapi/cmd/openapi-gen" ) diff --git a/vendor/k8s.io/gengo/args/args.go b/vendor/k8s.io/gengo/args/args.go deleted file mode 100644 index 93d863f0..00000000 --- a/vendor/k8s.io/gengo/args/args.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package args has common command-line flags for generation programs. -package args - -import ( - "bytes" - goflag "flag" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "time" - - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/parser" - "k8s.io/gengo/types" - - "github.com/spf13/pflag" -) - -// Default returns a defaulted GeneratorArgs. You may change the defaults -// before calling AddFlags. -func Default() *GeneratorArgs { - return &GeneratorArgs{ - OutputBase: DefaultSourceTree(), - GoHeaderFilePath: filepath.Join(DefaultSourceTree(), "k8s.io/gengo/boilerplate/boilerplate.go.txt"), - GeneratedBuildTag: "ignore_autogenerated", - GeneratedByCommentTemplate: "// Code generated by GENERATOR_NAME. DO NOT EDIT.", - defaultCommandLineFlags: true, - } -} - -// GeneratorArgs has arguments that are passed to generators. -type GeneratorArgs struct { - // Which directories to parse. - InputDirs []string - - // Source tree to write results to. - OutputBase string - - // Package path within the source tree. - OutputPackagePath string - - // Output file name. - OutputFileBaseName string - - // Where to get copyright header text. - GoHeaderFilePath string - - // If GeneratedByCommentTemplate is set, generate a "Code generated by" comment - // below the bloilerplate, of the format defined by this string. - // Any instances of "GENERATOR_NAME" will be replaced with the name of the code generator. - GeneratedByCommentTemplate string - - // If true, only verify, don't write anything. - VerifyOnly bool - - // If true, include *_test.go files - IncludeTestFiles bool - - // GeneratedBuildTag is the tag used to identify code generated by execution - // of this type. Each generator should use a different tag, and different - // groups of generators (external API that depends on Kube generations) should - // keep tags distinct as well. - GeneratedBuildTag string - - // Any custom arguments go here - CustomArgs interface{} - - // If specified, trim the prefix from OutputPackagePath before writing files. - TrimPathPrefix string - - // Whether to use default command line flags - defaultCommandLineFlags bool -} - -// WithoutDefaultFlagParsing disables implicit addition of command line flags and parsing. -func (g *GeneratorArgs) WithoutDefaultFlagParsing() *GeneratorArgs { - g.defaultCommandLineFlags = false - return g -} - -func (g *GeneratorArgs) AddFlags(fs *pflag.FlagSet) { - fs.StringSliceVarP(&g.InputDirs, "input-dirs", "i", g.InputDirs, "Comma-separated list of import paths to get input types from.") - fs.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/ or ./ if $GOPATH is not set.") - fs.StringVarP(&g.OutputPackagePath, "output-package", "p", g.OutputPackagePath, "Base package path.") - fs.StringVarP(&g.OutputFileBaseName, "output-file-base", "O", g.OutputFileBaseName, "Base name (without .go suffix) for output files.") - fs.StringVarP(&g.GoHeaderFilePath, "go-header-file", "h", g.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.") - fs.BoolVar(&g.VerifyOnly, "verify-only", g.VerifyOnly, "If true, only verify existing output, do not write anything.") - fs.StringVar(&g.GeneratedBuildTag, "build-tag", g.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.") - fs.StringVar(&g.TrimPathPrefix, "trim-path-prefix", g.TrimPathPrefix, "If set, trim the specified prefix from --output-package when generating files.") -} - -// LoadGoBoilerplate loads the boilerplate file passed to --go-header-file. -func (g *GeneratorArgs) LoadGoBoilerplate() ([]byte, error) { - b, err := ioutil.ReadFile(g.GoHeaderFilePath) - if err != nil { - return nil, err - } - b = bytes.Replace(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().UTC().Year())), -1) - - if g.GeneratedByCommentTemplate != "" { - if len(b) != 0 { - b = append(b, byte('\n')) - } - generatorName := path.Base(os.Args[0]) - generatedByComment := strings.Replace(g.GeneratedByCommentTemplate, "GENERATOR_NAME", generatorName, -1) - s := fmt.Sprintf("%s\n\n", generatedByComment) - b = append(b, []byte(s)...) - } - return b, nil -} - -// NewBuilder makes a new parser.Builder and populates it with the input -// directories. -func (g *GeneratorArgs) NewBuilder() (*parser.Builder, error) { - b := parser.New() - - // flag for including *_test.go - b.IncludeTestFiles = g.IncludeTestFiles - - // Ignore all auto-generated files. - b.AddBuildTags(g.GeneratedBuildTag) - - for _, d := range g.InputDirs { - var err error - if strings.HasSuffix(d, "/...") { - err = b.AddDirRecursive(strings.TrimSuffix(d, "/...")) - } else { - err = b.AddDir(d) - } - if err != nil { - return nil, fmt.Errorf("unable to add directory %q: %v", d, err) - } - } - return b, nil -} - -// InputIncludes returns true if the given package is a (sub) package of one of -// the InputDirs. -func (g *GeneratorArgs) InputIncludes(p *types.Package) bool { - for _, dir := range g.InputDirs { - d := dir - if strings.HasSuffix(d, "...") { - d = strings.TrimSuffix(d, "...") - } - if strings.HasPrefix(d, "./vendor/") { - d = strings.TrimPrefix(d, "./vendor/") - } - if strings.HasPrefix(p.Path, d) { - return true - } - } - return false -} - -// DefaultSourceTree returns the /src directory of the first entry in $GOPATH. -// If $GOPATH is empty, it returns "./". Useful as a default output location. -func DefaultSourceTree() string { - paths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator)) - if len(paths) > 0 && len(paths[0]) > 0 { - return filepath.Join(paths[0], "src") - } - return "./" -} - -// Execute implements main(). -// If you don't need any non-default behavior, use as: -// args.Default().Execute(...) -func (g *GeneratorArgs) Execute(nameSystems namer.NameSystems, defaultSystem string, pkgs func(*generator.Context, *GeneratorArgs) generator.Packages) error { - if g.defaultCommandLineFlags { - g.AddFlags(pflag.CommandLine) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - pflag.Parse() - } - - b, err := g.NewBuilder() - if err != nil { - return fmt.Errorf("Failed making a parser: %v", err) - } - - // pass through the flag on whether to include *_test.go files - b.IncludeTestFiles = g.IncludeTestFiles - - c, err := generator.NewContext(b, nameSystems, defaultSystem) - if err != nil { - return fmt.Errorf("Failed making a context: %v", err) - } - - c.TrimPathPrefix = g.TrimPathPrefix - - c.Verify = g.VerifyOnly - packages := pkgs(c, g) - if err := c.ExecutePackages(g.OutputBase, packages); err != nil { - return fmt.Errorf("Failed executing generator: %v", err) - } - - return nil -} diff --git a/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go b/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go deleted file mode 100644 index 7232f5e6..00000000 --- a/vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go +++ /dev/null @@ -1,419 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package generators has the generators for the import-boss utility. -package generators - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "sort" - "strings" - - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - "sigs.k8s.io/yaml" - - "k8s.io/klog/v2" -) - -const ( - goModFile = "go.mod" - importBossFileType = "import-boss" -) - -// NameSystems returns the name system used by the generators in this package. -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "raw": namer.NewRawNamer("", nil), - } -} - -// DefaultNameSystem returns the default name system for ordering the types to be -// processed by the generators in this package. -func DefaultNameSystem() string { - return "raw" -} - -// Packages makes the import-boss package definition. -func Packages(c *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - pkgs := generator.Packages{} - c.FileTypes = map[string]generator.FileType{ - importBossFileType: importRuleFile{c}, - } - - for _, p := range c.Universe { - if !arguments.InputIncludes(p) { - // Don't run on e.g. third party dependencies. - continue - } - savedPackage := p - pkgs = append(pkgs, &generator.DefaultPackage{ - PackageName: p.Name, - PackagePath: p.Path, - Source: p.SourcePath, - // GeneratorFunc returns a list of generators. Each generator makes a - // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{&importRules{ - myPackage: savedPackage, - }} - }, - FilterFunc: func(c *generator.Context, t *types.Type) bool { - return false - }, - }) - } - - return pkgs -} - -// A single import restriction rule. -type Rule struct { - // All import paths that match this regexp... - SelectorRegexp string - // ... must have one of these prefixes ... - AllowedPrefixes []string - // ... and must not have one of these prefixes. - ForbiddenPrefixes []string -} - -type InverseRule struct { - Rule - // True if the rule is to be applied to transitive imports. - Transitive bool -} - -type fileFormat struct { - CurrentImports []string - - Rules []Rule - InverseRules []InverseRule - - path string -} - -func readFile(path string) (*fileFormat, error) { - currentBytes, err := ioutil.ReadFile(path) - if err != nil { - return nil, fmt.Errorf("couldn't read %v: %v", path, err) - } - - var current fileFormat - err = yaml.Unmarshal(currentBytes, ¤t) - if err != nil { - return nil, fmt.Errorf("couldn't unmarshal %v: %v", path, err) - } - current.path = path - return ¤t, nil -} - -func writeFile(path string, ff *fileFormat) error { - raw, err := json.MarshalIndent(ff, "", "\t") - if err != nil { - return fmt.Errorf("couldn't format data for file %v.\n%#v", path, ff) - } - f, err := os.Create(path) - if err != nil { - return fmt.Errorf("couldn't open %v for writing: %v", path, err) - } - defer f.Close() - _, err = f.Write(raw) - return err -} - -// This does the actual checking, since it knows the literal destination file. -type importRuleFile struct { - context *generator.Context -} - -func (irf importRuleFile) AssembleFile(f *generator.File, path string) error { - return irf.VerifyFile(f, path) -} - -// TODO: make a flag to enable this, or expose this information in some other way. -func (importRuleFile) listEntireImportTree(f *generator.File, path string) error { - // If the file exists, populate its current imports. This is mostly to help - // humans figure out what they need to fix. - if _, err := os.Stat(path); err != nil { - // Ignore packages which haven't opted in by adding an .import-restrictions file. - return nil - } - - current, err := readFile(path) - if err != nil { - return err - } - - current.CurrentImports = []string{} - for v := range f.Imports { - current.CurrentImports = append(current.CurrentImports, v) - } - sort.Strings(current.CurrentImports) - - return writeFile(path, current) -} - -// removeLastDir removes the last directory, but leaves the file name -// unchanged. It returns the new path and the removed directory. So: -// "a/b/c/file" -> ("a/b/file", "c") -func removeLastDir(path string) (newPath, removedDir string) { - dir, file := filepath.Split(path) - dir = strings.TrimSuffix(dir, string(filepath.Separator)) - return filepath.Join(filepath.Dir(dir), file), filepath.Base(dir) -} - -// isGoModRoot checks if a directory is the root directory for a package -// by checking for the existence of a 'go.mod' file in that directory. -func isGoModRoot(path string) bool { - _, err := os.Stat(filepath.Join(filepath.Dir(path), goModFile)) - return err == nil -} - -// recursiveRead collects all '.import-restriction' files, between the current directory, -// and the package root when Go modules are enabled, or $GOPATH/src when they are not. -func recursiveRead(path string) ([]*fileFormat, error) { - restrictionFiles := make([]*fileFormat, 0) - - for { - if _, err := os.Stat(path); err == nil { - rules, err := readFile(path) - if err != nil { - return nil, err - } - - restrictionFiles = append(restrictionFiles, rules) - } - - nextPath, removedDir := removeLastDir(path) - if nextPath == path || isGoModRoot(path) || removedDir == "src" { - break - } - - path = nextPath - } - - return restrictionFiles, nil -} - -func (irf importRuleFile) VerifyFile(f *generator.File, path string) error { - restrictionFiles, err := recursiveRead(filepath.Join(f.PackageSourcePath, f.Name)) - if err != nil { - return fmt.Errorf("error finding rules file: %v", err) - } - - if err := irf.verifyRules(restrictionFiles, f); err != nil { - return err - } - - return irf.verifyInverseRules(restrictionFiles, f) -} - -func (irf importRuleFile) verifyRules(restrictionFiles []*fileFormat, f *generator.File) error { - selectors := make([][]*regexp.Regexp, len(restrictionFiles)) - for i, restrictionFile := range restrictionFiles { - for _, r := range restrictionFile.Rules { - re, err := regexp.Compile(r.SelectorRegexp) - if err != nil { - return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, restrictionFile.path, err) - } - - selectors[i] = append(selectors[i], re) - } - } - - forbiddenImports := map[string]string{} - allowedMismatchedImports := []string{} - - for v := range f.Imports { - explicitlyAllowed := false - - NextRestrictionFiles: - for i, rules := range restrictionFiles { - for j, r := range rules.Rules { - matching := selectors[i][j].MatchString(v) - klog.V(5).Infof("Checking %v matches %v: %v\n", r.SelectorRegexp, v, matching) - if !matching { - continue - } - for _, forbidden := range r.ForbiddenPrefixes { - klog.V(4).Infof("Checking %v against %v\n", v, forbidden) - if strings.HasPrefix(v, forbidden) { - forbiddenImports[v] = forbidden - } - } - for _, allowed := range r.AllowedPrefixes { - klog.V(4).Infof("Checking %v against %v\n", v, allowed) - if strings.HasPrefix(v, allowed) { - explicitlyAllowed = true - break - } - } - - if !explicitlyAllowed { - allowedMismatchedImports = append(allowedMismatchedImports, v) - } else { - klog.V(2).Infof("%v importing %v allowed by %v\n", f.PackagePath, v, restrictionFiles[i].path) - break NextRestrictionFiles - } - } - } - } - - if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 { - var errorBuilder strings.Builder - for i, f := range forbiddenImports { - fmt.Fprintf(&errorBuilder, "import %v has forbidden prefix %v\n", i, f) - } - if len(allowedMismatchedImports) > 0 { - sort.Sort(sort.StringSlice(allowedMismatchedImports)) - fmt.Fprintf(&errorBuilder, "the following imports did not match any allowed prefix:\n") - for _, i := range allowedMismatchedImports { - fmt.Fprintf(&errorBuilder, " %v\n", i) - } - } - return errors.New(errorBuilder.String()) - } - - return nil -} - -// verifyInverseRules checks that all packages that import a package are allowed to import it. -func (irf importRuleFile) verifyInverseRules(restrictionFiles []*fileFormat, f *generator.File) error { - // compile all Selector regex in all restriction files - selectors := make([][]*regexp.Regexp, len(restrictionFiles)) - for i, restrictionFile := range restrictionFiles { - for _, r := range restrictionFile.InverseRules { - re, err := regexp.Compile(r.SelectorRegexp) - if err != nil { - return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, restrictionFile.path, err) - } - - selectors[i] = append(selectors[i], re) - } - } - - directImport := map[string]bool{} - for _, imp := range irf.context.IncomingImports()[f.PackagePath] { - directImport[imp] = true - } - - forbiddenImports := map[string]string{} - allowedMismatchedImports := []string{} - - for _, v := range irf.context.TransitiveIncomingImports()[f.PackagePath] { - explicitlyAllowed := false - - NextRestrictionFiles: - for i, rules := range restrictionFiles { - for j, r := range rules.InverseRules { - if !r.Transitive && !directImport[v] { - continue - } - - re := selectors[i][j] - matching := re.MatchString(v) - klog.V(4).Infof("Checking %v matches %v (importing %v: %v\n", r.SelectorRegexp, v, f.PackagePath, matching) - if !matching { - continue - } - for _, forbidden := range r.ForbiddenPrefixes { - klog.V(4).Infof("Checking %v against %v\n", v, forbidden) - if strings.HasPrefix(v, forbidden) { - forbiddenImports[v] = forbidden - } - } - for _, allowed := range r.AllowedPrefixes { - klog.V(4).Infof("Checking %v against %v\n", v, allowed) - if strings.HasPrefix(v, allowed) { - explicitlyAllowed = true - break - } - } - if !explicitlyAllowed { - allowedMismatchedImports = append(allowedMismatchedImports, v) - } else { - klog.V(2).Infof("%v importing %v allowed by %v\n", v, f.PackagePath, restrictionFiles[i].path) - break NextRestrictionFiles - } - } - } - } - - if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 { - var errorBuilder strings.Builder - for i, f := range forbiddenImports { - fmt.Fprintf(&errorBuilder, "(inverse): import %v has forbidden prefix %v\n", i, f) - } - if len(allowedMismatchedImports) > 0 { - sort.Sort(sort.StringSlice(allowedMismatchedImports)) - fmt.Fprintf(&errorBuilder, "(inverse): the following imports did not match any allowed prefix:\n") - for _, i := range allowedMismatchedImports { - fmt.Fprintf(&errorBuilder, " %v\n", i) - } - } - return errors.New(errorBuilder.String()) - } - - return nil -} - -// importRules produces a file with a set for a single type. -type importRules struct { - myPackage *types.Package - imports namer.ImportTracker -} - -var ( - _ = generator.Generator(&importRules{}) - _ = generator.FileType(importRuleFile{}) -) - -func (r *importRules) Name() string { return "import rules" } -func (r *importRules) Filter(*generator.Context, *types.Type) bool { return false } -func (r *importRules) Namers(*generator.Context) namer.NameSystems { return nil } -func (r *importRules) PackageVars(*generator.Context) []string { return []string{} } -func (r *importRules) PackageConsts(*generator.Context) []string { return []string{} } -func (r *importRules) GenerateType(*generator.Context, *types.Type, io.Writer) error { return nil } -func (r *importRules) Filename() string { return ".import-restrictions" } -func (r *importRules) FileType() string { return importBossFileType } -func (r *importRules) Init(c *generator.Context, w io.Writer) error { return nil } -func (r *importRules) Finalize(*generator.Context, io.Writer) error { return nil } - -func dfsImports(dest *[]string, seen map[string]bool, p *types.Package) { - for _, p2 := range p.Imports { - if seen[p2.Path] { - continue - } - seen[p2.Path] = true - dfsImports(dest, seen, p2) - *dest = append(*dest, p2.Path) - } -} - -func (r *importRules) Imports(*generator.Context) []string { - all := []string{} - dfsImports(&all, map[string]bool{}, r.myPackage) - return all -} diff --git a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go b/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go deleted file mode 100644 index e89f5ad7..00000000 --- a/vendor/k8s.io/gengo/examples/set-gen/generators/sets.go +++ /dev/null @@ -1,378 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package generators has the generators for the set-gen utility. -package generators - -import ( - "io" - - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - - "k8s.io/klog/v2" -) - -// NameSystems returns the name system used by the generators in this package. -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "public": namer.NewPublicNamer(0), - "private": namer.NewPrivateNamer(0), - "raw": namer.NewRawNamer("", nil), - } -} - -// DefaultNameSystem returns the default name system for ordering the types to be -// processed by the generators in this package. -func DefaultNameSystem() string { - return "public" -} - -// Packages makes the sets package definition. -func Packages(_ *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() - if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) - } - - return generator.Packages{&generator.DefaultPackage{ - PackageName: "sets", - PackagePath: arguments.OutputPackagePath, - HeaderText: boilerplate, - PackageDocumentation: []byte( - `// Package sets has auto-generated set types. -`), - // GeneratorFunc returns a list of generators. Each generator makes a - // single file. - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { - generators = []generator.Generator{ - // Always generate a "doc.go" file. - generator.DefaultGen{OptionalName: "doc"}, - // Make a separate file for the Empty type, since it's shared by every type. - generator.DefaultGen{ - OptionalName: "empty", - OptionalBody: []byte(emptyTypeDecl), - }, - } - // Since we want a file per type that we generate a set for, we - // have to provide a function for this. - for _, t := range c.Order { - generators = append(generators, &genSet{ - DefaultGen: generator.DefaultGen{ - // Use the privatized version of the - // type name as the file name. - // - // TODO: make a namer that converts - // camelCase to '-' separation for file - // names? - OptionalName: c.Namers["private"].Name(t), - }, - outputPackage: arguments.OutputPackagePath, - typeToMatch: t, - imports: generator.NewImportTracker(), - }) - } - return generators - }, - FilterFunc: func(c *generator.Context, t *types.Type) bool { - // It would be reasonable to filter by the type's package here. - // It might be necessary if your input directory has a big - // import graph. - switch t.Kind { - case types.Map, types.Slice, types.Pointer: - // These types can't be keys in a map. - return false - case types.Builtin: - return true - case types.Struct: - // Only some structs can be keys in a map. This is triggered by the line - // // +genset - // or - // // +genset=true - return extractBoolTagOrDie("genset", t.CommentLines) == true - } - return false - }, - }} -} - -// genSet produces a file with a set for a single type. -type genSet struct { - generator.DefaultGen - outputPackage string - typeToMatch *types.Type - imports namer.ImportTracker -} - -// Filter ignores all but one type because we're making a single file per type. -func (g *genSet) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch } - -func (g *genSet) Namers(c *generator.Context) namer.NameSystems { - return namer.NameSystems{ - "raw": namer.NewRawNamer(g.outputPackage, g.imports), - } -} - -func (g *genSet) Imports(c *generator.Context) (imports []string) { - return append(g.imports.ImportLines(), "reflect", "sort") -} - -// args constructs arguments for templates. Usage: -// g.args(t, "key1", value1, "key2", value2, ...) -// -// 't' is loaded with the key 'type'. -// -// We could use t directly as the argument, but doing it this way makes it easy -// to mix in additional parameters. This feature is not used in this set -// generator, but is present as an example. -func (g *genSet) args(t *types.Type, kv ...interface{}) interface{} { - m := map[interface{}]interface{}{"type": t} - for i := 0; i < len(kv)/2; i++ { - m[kv[i*2]] = kv[i*2+1] - } - return m -} - -// GenerateType makes the body of a file implementing a set for type t. -func (g *genSet) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - sw := generator.NewSnippetWriter(w, c, "$", "$") - sw.Do(setCode, g.args(t)) - sw.Do("func less$.type|public$(lhs, rhs $.type|raw$) bool {\n", g.args(t)) - g.lessBody(sw, t) - sw.Do("}\n", g.args(t)) - return sw.Error() -} - -func (g *genSet) lessBody(sw *generator.SnippetWriter, t *types.Type) { - // TODO: make this recursive, handle pointers and multiple nested structs... - switch t.Kind { - case types.Struct: - for _, m := range types.FlattenMembers(t.Members) { - sw.Do("if lhs.$.Name$ < rhs.$.Name$ { return true }\n", m) - sw.Do("if lhs.$.Name$ > rhs.$.Name$ { return false }\n", m) - } - sw.Do("return false\n", nil) - default: - sw.Do("return lhs < rhs\n", nil) - } -} - -// written to the "empty.go" file. -var emptyTypeDecl = ` -// Empty is public since it is used by some internal API objects for conversions between external -// string arrays and internal sets, and conversion logic requires public types today. -type Empty struct{} -` - -// Written for every type. If you've never used text/template before: -// $.type$ refers to the source type; |public means to -// call the function giving the public name, |raw the raw type name. -var setCode = `// sets.$.type|public$ is a set of $.type|raw$s, implemented via map[$.type|raw$]struct{} for minimal memory consumption. -type $.type|public$ map[$.type|raw$]Empty - -// New$.type|public$ creates a $.type|public$ from a list of values. -func New$.type|public$(items ...$.type|raw$) $.type|public$ { - ss := make($.type|public$, len(items)) - ss.Insert(items...) - return ss -} - -// $.type|public$KeySet creates a $.type|public$ from a keys of a map[$.type|raw$](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func $.type|public$KeySet(theMap interface{}) $.type|public$ { - v := reflect.ValueOf(theMap) - ret := $.type|public${} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().($.type|raw$)) - } - return ret -} - -// Insert adds items to the set. -func (s $.type|public$) Insert(items ...$.type|raw$) $.type|public$ { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s $.type|public$) Delete(items ...$.type|raw$) $.type|public$ { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s $.type|public$) Has(item $.type|raw$) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s $.type|public$) HasAll(items ...$.type|raw$) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s $.type|public$) HasAny(items ...$.type|raw$) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Clone returns a new set which is a copy of the current set. -func (s $.type|public$) Clone() $.type|public$ { - result := make($.type|public$, len(s)) - for key := range s { - result.Insert(key) - } - return result -} - -// Difference returns a set of objects that are not in s2. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s1 $.type|public$) Difference(s2 $.type|public$) $.type|public$ { - result := New$.type|public$() - for key := range s1 { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.SymmetricDifference(s2) = {a3, a4, a5} -// s2.SymmetricDifference(s1) = {a3, a4, a5} -func (s1 $.type|public$) SymmetricDifference(s2 $.type|public$) $.type|public$ { - return s1.Difference(s2).Union(s2.Difference(s1)) -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 $.type|public$) Union(s2 $.type|public$) $.type|public$ { - result := s1.Clone() - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 $.type|public$) Intersection(s2 $.type|public$) $.type|public$ { - var walk, other $.type|public$ - result := New$.type|public$() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 $.type|public$) IsSuperset(s2 $.type|public$) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 $.type|public$) Equal(s2 $.type|public$) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOf$.type|public$ []$.type|raw$ - -func (s sortableSliceOf$.type|public$) Len() int { return len(s) } -func (s sortableSliceOf$.type|public$) Less(i, j int) bool { return less$.type|public$(s[i], s[j]) } -func (s sortableSliceOf$.type|public$) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted $.type|raw$ slice. -func (s $.type|public$) List() []$.type|raw$ { - res := make(sortableSliceOf$.type|public$, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []$.type|raw$(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s $.type|public$) UnsortedList() []$.type|raw$ { - res :=make([]$.type|raw$, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s $.type|public$) PopAny() ($.type|raw$, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue $.type|raw$ - return zeroValue, false -} - -// Len returns the size of the set. -func (s $.type|public$) Len() int { - return len(s) -} - -` diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go b/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go deleted file mode 100644 index e9660c2f..00000000 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/byte.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. -type Byte map[byte]Empty - -// NewByte creates a Byte from a list of values. -func NewByte(items ...byte) Byte { - ss := make(Byte, len(items)) - ss.Insert(items...) - return ss -} - -// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func ByteKeySet(theMap interface{}) Byte { - v := reflect.ValueOf(theMap) - ret := Byte{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(byte)) - } - return ret -} - -// Insert adds items to the set. -func (s Byte) Insert(items ...byte) Byte { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Byte) Delete(items ...byte) Byte { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Byte) Has(item byte) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Byte) HasAll(items ...byte) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Byte) HasAny(items ...byte) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Clone returns a new set which is a copy of the current set. -func (s Byte) Clone() Byte { - result := make(Byte, len(s)) - for key := range s { - result.Insert(key) - } - return result -} - -// Difference returns a set of objects that are not in s2. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s1 Byte) Difference(s2 Byte) Byte { - result := NewByte() - for key := range s1 { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.SymmetricDifference(s2) = {a3, a4, a5} -// s2.SymmetricDifference(s1) = {a3, a4, a5} -func (s1 Byte) SymmetricDifference(s2 Byte) Byte { - return s1.Difference(s2).Union(s2.Difference(s1)) -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Byte) Union(s2 Byte) Byte { - result := s1.Clone() - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Byte) Intersection(s2 Byte) Byte { - var walk, other Byte - result := NewByte() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Byte) IsSuperset(s2 Byte) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Byte) Equal(s2 Byte) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfByte []byte - -func (s sortableSliceOfByte) Len() int { return len(s) } -func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } -func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted byte slice. -func (s Byte) List() []byte { - res := make(sortableSliceOfByte, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []byte(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Byte) UnsortedList() []byte { - res := make([]byte, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Byte) PopAny() (byte, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue byte - return zeroValue, false -} - -// Len returns the size of the set. -func (s Byte) Len() int { - return len(s) -} - -func lessByte(lhs, rhs byte) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int.go deleted file mode 100644 index f614f06e..00000000 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/int.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. -type Int map[int]Empty - -// NewInt creates a Int from a list of values. -func NewInt(items ...int) Int { - ss := make(Int, len(items)) - ss.Insert(items...) - return ss -} - -// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func IntKeySet(theMap interface{}) Int { - v := reflect.ValueOf(theMap) - ret := Int{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int)) - } - return ret -} - -// Insert adds items to the set. -func (s Int) Insert(items ...int) Int { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Int) Delete(items ...int) Int { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Int) Has(item int) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Int) HasAll(items ...int) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Int) HasAny(items ...int) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Clone returns a new set which is a copy of the current set. -func (s Int) Clone() Int { - result := make(Int, len(s)) - for key := range s { - result.Insert(key) - } - return result -} - -// Difference returns a set of objects that are not in s2. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s1 Int) Difference(s2 Int) Int { - result := NewInt() - for key := range s1 { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.SymmetricDifference(s2) = {a3, a4, a5} -// s2.SymmetricDifference(s1) = {a3, a4, a5} -func (s1 Int) SymmetricDifference(s2 Int) Int { - return s1.Difference(s2).Union(s2.Difference(s1)) -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Int) Union(s2 Int) Int { - result := s1.Clone() - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Int) Intersection(s2 Int) Int { - var walk, other Int - result := NewInt() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Int) IsSuperset(s2 Int) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Int) Equal(s2 Int) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfInt []int - -func (s sortableSliceOfInt) Len() int { return len(s) } -func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } -func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted int slice. -func (s Int) List() []int { - res := make(sortableSliceOfInt, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Int) UnsortedList() []int { - res := make([]int, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Int) PopAny() (int, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int - return zeroValue, false -} - -// Len returns the size of the set. -func (s Int) Len() int { - return len(s) -} - -func lessInt(lhs, rhs int) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go b/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go deleted file mode 100644 index 995d99bd..00000000 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/int64.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. -type Int64 map[int64]Empty - -// NewInt64 creates a Int64 from a list of values. -func NewInt64(items ...int64) Int64 { - ss := make(Int64, len(items)) - ss.Insert(items...) - return ss -} - -// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func Int64KeySet(theMap interface{}) Int64 { - v := reflect.ValueOf(theMap) - ret := Int64{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int64)) - } - return ret -} - -// Insert adds items to the set. -func (s Int64) Insert(items ...int64) Int64 { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Int64) Delete(items ...int64) Int64 { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Int64) Has(item int64) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Int64) HasAll(items ...int64) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Int64) HasAny(items ...int64) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Clone returns a new set which is a copy of the current set. -func (s Int64) Clone() Int64 { - result := make(Int64, len(s)) - for key := range s { - result.Insert(key) - } - return result -} - -// Difference returns a set of objects that are not in s2. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s1 Int64) Difference(s2 Int64) Int64 { - result := NewInt64() - for key := range s1 { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.SymmetricDifference(s2) = {a3, a4, a5} -// s2.SymmetricDifference(s1) = {a3, a4, a5} -func (s1 Int64) SymmetricDifference(s2 Int64) Int64 { - return s1.Difference(s2).Union(s2.Difference(s1)) -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Int64) Union(s2 Int64) Int64 { - result := s1.Clone() - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Int64) Intersection(s2 Int64) Int64 { - var walk, other Int64 - result := NewInt64() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Int64) IsSuperset(s2 Int64) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Int64) Equal(s2 Int64) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfInt64 []int64 - -func (s sortableSliceOfInt64) Len() int { return len(s) } -func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } -func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted int64 slice. -func (s Int64) List() []int64 { - res := make(sortableSliceOfInt64, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int64(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Int64) UnsortedList() []int64 { - res := make([]int64, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Int64) PopAny() (int64, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int64 - return zeroValue, false -} - -// Len returns the size of the set. -func (s Int64) Len() int { - return len(s) -} - -func lessInt64(lhs, rhs int64) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/string.go b/vendor/k8s.io/gengo/examples/set-gen/sets/string.go deleted file mode 100644 index 4a4a92fd..00000000 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/string.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. -type String map[string]Empty - -// NewString creates a String from a list of values. -func NewString(items ...string) String { - ss := make(String, len(items)) - ss.Insert(items...) - return ss -} - -// StringKeySet creates a String from a keys of a map[string](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func StringKeySet(theMap interface{}) String { - v := reflect.ValueOf(theMap) - ret := String{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(string)) - } - return ret -} - -// Insert adds items to the set. -func (s String) Insert(items ...string) String { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s String) Delete(items ...string) String { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s String) Has(item string) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s String) HasAll(items ...string) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s String) HasAny(items ...string) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Clone returns a new set which is a copy of the current set. -func (s String) Clone() String { - result := make(String, len(s)) - for key := range s { - result.Insert(key) - } - return result -} - -// Difference returns a set of objects that are not in s2. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s1 String) Difference(s2 String) String { - result := NewString() - for key := range s1 { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.SymmetricDifference(s2) = {a3, a4, a5} -// s2.SymmetricDifference(s1) = {a3, a4, a5} -func (s1 String) SymmetricDifference(s2 String) String { - return s1.Difference(s2).Union(s2.Difference(s1)) -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 String) Union(s2 String) String { - result := s1.Clone() - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 String) Intersection(s2 String) String { - var walk, other String - result := NewString() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 String) IsSuperset(s2 String) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 String) Equal(s2 String) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfString []string - -func (s sortableSliceOfString) Len() int { return len(s) } -func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } -func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted string slice. -func (s String) List() []string { - res := make(sortableSliceOfString, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []string(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s String) UnsortedList() []string { - res := make([]string, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s String) PopAny() (string, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue string - return zeroValue, false -} - -// Len returns the size of the set. -func (s String) Len() int { - return len(s) -} - -func lessString(lhs, rhs string) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/gengo/generator/default_generator.go b/vendor/k8s.io/gengo/generator/default_generator.go deleted file mode 100644 index f9476682..00000000 --- a/vendor/k8s.io/gengo/generator/default_generator.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generator - -import ( - "io" - - "k8s.io/gengo/namer" - "k8s.io/gengo/types" -) - -const ( - GolangFileType = "golang" -) - -// DefaultGen implements a do-nothing Generator. -// -// It can be used to implement static content files. -type DefaultGen struct { - // OptionalName, if present, will be used for the generator's name, and - // the filename (with ".go" appended). - OptionalName string - - // OptionalBody, if present, will be used as the return from the "Init" - // method. This causes it to be static content for the entire file if - // no other generator touches the file. - OptionalBody []byte -} - -func (d DefaultGen) Name() string { return d.OptionalName } -func (d DefaultGen) Filter(*Context, *types.Type) bool { return true } -func (d DefaultGen) Namers(*Context) namer.NameSystems { return nil } -func (d DefaultGen) Imports(*Context) []string { return []string{} } -func (d DefaultGen) PackageVars(*Context) []string { return []string{} } -func (d DefaultGen) PackageConsts(*Context) []string { return []string{} } -func (d DefaultGen) GenerateType(*Context, *types.Type, io.Writer) error { return nil } -func (d DefaultGen) Filename() string { return d.OptionalName + ".go" } -func (d DefaultGen) FileType() string { return GolangFileType } -func (d DefaultGen) Finalize(*Context, io.Writer) error { return nil } - -func (d DefaultGen) Init(c *Context, w io.Writer) error { - _, err := w.Write(d.OptionalBody) - return err -} - -var ( - _ = Generator(DefaultGen{}) -) diff --git a/vendor/k8s.io/gengo/generator/default_package.go b/vendor/k8s.io/gengo/generator/default_package.go deleted file mode 100644 index dcf08832..00000000 --- a/vendor/k8s.io/gengo/generator/default_package.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generator - -import ( - "k8s.io/gengo/types" -) - -// DefaultPackage contains a default implementation of Package. -type DefaultPackage struct { - // Short name of package, used in the "package xxxx" line. - PackageName string - // Import path of the package, and the location on disk of the package. - PackagePath string - // The location of the package on disk. - Source string - - // Emitted at the top of every file. - HeaderText []byte - - // Emitted only for a "doc.go" file; appended to the HeaderText for - // that file. - PackageDocumentation []byte - - // If non-nil, will be called on "Generators"; otherwise, the static - // list will be used. So you should set only one of these two fields. - GeneratorFunc func(*Context) []Generator - GeneratorList []Generator - - // Optional; filters the types exposed to the generators. - FilterFunc func(*Context, *types.Type) bool -} - -func (d *DefaultPackage) Name() string { return d.PackageName } -func (d *DefaultPackage) Path() string { return d.PackagePath } -func (d *DefaultPackage) SourcePath() string { return d.Source } - -func (d *DefaultPackage) Filter(c *Context, t *types.Type) bool { - if d.FilterFunc != nil { - return d.FilterFunc(c, t) - } - return true -} - -func (d *DefaultPackage) Generators(c *Context) []Generator { - if d.GeneratorFunc != nil { - return d.GeneratorFunc(c) - } - return d.GeneratorList -} - -func (d *DefaultPackage) Header(filename string) []byte { - if filename == "doc.go" { - return append(d.HeaderText, d.PackageDocumentation...) - } - return d.HeaderText -} - -var ( - _ = Package(&DefaultPackage{}) -) diff --git a/vendor/k8s.io/gengo/generator/transitive_closure.go b/vendor/k8s.io/gengo/generator/transitive_closure.go deleted file mode 100644 index 385a49fc..00000000 --- a/vendor/k8s.io/gengo/generator/transitive_closure.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generator - -import "sort" - -type edge struct { - from string - to string -} - -func transitiveClosure(in map[string][]string) map[string][]string { - adj := make(map[edge]bool) - imports := make(map[string]struct{}) - for from, tos := range in { - for _, to := range tos { - adj[edge{from, to}] = true - imports[to] = struct{}{} - } - } - - // Warshal's algorithm - for k := range in { - for i := range in { - if !adj[edge{i, k}] { - continue - } - for j := range imports { - if adj[edge{i, j}] { - continue - } - if adj[edge{k, j}] { - adj[edge{i, j}] = true - } - } - } - } - - out := make(map[string][]string, len(in)) - for i := range in { - for j := range imports { - if adj[edge{i, j}] { - out[i] = append(out[i], j) - } - } - - sort.Strings(out[i]) - } - - return out -} diff --git a/vendor/k8s.io/gengo/parser/parse.go b/vendor/k8s.io/gengo/parser/parse.go deleted file mode 100644 index bbd71929..00000000 --- a/vendor/k8s.io/gengo/parser/parse.go +++ /dev/null @@ -1,925 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package parser - -import ( - "fmt" - "go/ast" - "go/build" - "go/constant" - "go/parser" - "go/token" - tc "go/types" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "sort" - "strings" - - "k8s.io/gengo/types" - "k8s.io/klog/v2" -) - -// This clarifies when a pkg path has been canonicalized. -type importPathString string - -// Builder lets you add all the go files in all the packages that you care -// about, then constructs the type source data. -type Builder struct { - context *build.Context - - // If true, include *_test.go - IncludeTestFiles bool - - // Map of package names to more canonical information about the package. - // This might hold the same value for multiple names, e.g. if someone - // referenced ./pkg/name or in the case of vendoring, which canonicalizes - // differently that what humans would type. - // - // This must only be accessed via getLoadedBuildPackage and setLoadedBuildPackage - buildPackages map[importPathString]*build.Package - - fset *token.FileSet - // map of package path to list of parsed files - parsed map[importPathString][]parsedFile - // map of package path to absolute path (to prevent overlap) - absPaths map[importPathString]string - - // Set by typeCheckPackage(), used by importPackage() and friends. - typeCheckedPackages map[importPathString]*tc.Package - - // Map of package path to whether the user requested it or it was from - // an import. - userRequested map[importPathString]bool - - // All comments from everywhere in every parsed file. - endLineToCommentGroup map[fileLine]*ast.CommentGroup - - // map of package to list of packages it imports. - importGraph map[importPathString]map[string]struct{} -} - -// parsedFile is for tracking files with name -type parsedFile struct { - name string - file *ast.File -} - -// key type for finding comments. -type fileLine struct { - file string - line int -} - -// New constructs a new builder. -func New() *Builder { - c := build.Default - if c.GOROOT == "" { - if p, err := exec.Command("which", "go").CombinedOutput(); err == nil { - // The returned string will have some/path/bin/go, so remove the last two elements. - c.GOROOT = filepath.Dir(filepath.Dir(strings.Trim(string(p), "\n"))) - } else { - klog.Warningf("Warning: $GOROOT not set, and unable to run `which go` to find it: %v\n", err) - } - } - // Force this to off, since we don't properly parse CGo. All symbols must - // have non-CGo equivalents. - c.CgoEnabled = false - return &Builder{ - context: &c, - buildPackages: map[importPathString]*build.Package{}, - typeCheckedPackages: map[importPathString]*tc.Package{}, - fset: token.NewFileSet(), - parsed: map[importPathString][]parsedFile{}, - absPaths: map[importPathString]string{}, - userRequested: map[importPathString]bool{}, - endLineToCommentGroup: map[fileLine]*ast.CommentGroup{}, - importGraph: map[importPathString]map[string]struct{}{}, - } -} - -// AddBuildTags adds the specified build tags to the parse context. -func (b *Builder) AddBuildTags(tags ...string) { - b.context.BuildTags = append(b.context.BuildTags, tags...) -} - -func (b *Builder) getLoadedBuildPackage(importPath string) (*build.Package, bool) { - canonicalized := canonicalizeImportPath(importPath) - if string(canonicalized) != importPath { - klog.V(5).Infof("getLoadedBuildPackage: %s normalized to %s", importPath, canonicalized) - } - buildPkg, ok := b.buildPackages[canonicalized] - return buildPkg, ok -} -func (b *Builder) setLoadedBuildPackage(importPath string, buildPkg *build.Package) { - canonicalizedImportPath := canonicalizeImportPath(importPath) - if string(canonicalizedImportPath) != importPath { - klog.V(5).Infof("setLoadedBuildPackage: importPath %s normalized to %s", importPath, canonicalizedImportPath) - } - - canonicalizedBuildPkgImportPath := canonicalizeImportPath(buildPkg.ImportPath) - if string(canonicalizedBuildPkgImportPath) != buildPkg.ImportPath { - klog.V(5).Infof("setLoadedBuildPackage: buildPkg.ImportPath %s normalized to %s", buildPkg.ImportPath, canonicalizedBuildPkgImportPath) - } - - if canonicalizedImportPath != canonicalizedBuildPkgImportPath { - klog.V(5).Infof("setLoadedBuildPackage: normalized importPath (%s) differs from buildPkg.ImportPath (%s)", canonicalizedImportPath, canonicalizedBuildPkgImportPath) - } - b.buildPackages[canonicalizedImportPath] = buildPkg - b.buildPackages[canonicalizedBuildPkgImportPath] = buildPkg -} - -// Get package information from the go/build package. Automatically excludes -// e.g. test files and files for other platforms-- there is quite a bit of -// logic of that nature in the build package. -func (b *Builder) importBuildPackage(dir string) (*build.Package, error) { - if buildPkg, ok := b.getLoadedBuildPackage(dir); ok { - return buildPkg, nil - } - // This validates the `package foo // github.com/bar/foo` comments. - buildPkg, err := b.importWithMode(dir, build.ImportComment) - if err != nil { - if _, ok := err.(*build.NoGoError); !ok { - return nil, fmt.Errorf("unable to import %q: %v", dir, err) - } - } - if buildPkg == nil { - // Might be an empty directory. Try to just find the dir. - buildPkg, err = b.importWithMode(dir, build.FindOnly) - if err != nil { - return nil, err - } - } - - // Remember it under the user-provided name. - klog.V(5).Infof("saving buildPackage %s", dir) - b.setLoadedBuildPackage(dir, buildPkg) - - return buildPkg, nil -} - -// AddFileForTest adds a file to the set, without verifying that the provided -// pkg actually exists on disk. The pkg must be of the form "canonical/pkg/path" -// and the path must be the absolute path to the file. Because this bypasses -// the normal recursive finding of package dependencies (on disk), test should -// sort their test files topologically first, so all deps are resolved by the -// time we need them. -func (b *Builder) AddFileForTest(pkg string, path string, src []byte) error { - if err := b.addFile(importPathString(pkg), path, src, true); err != nil { - return err - } - if _, err := b.typeCheckPackage(importPathString(pkg), true); err != nil { - return err - } - return nil -} - -// addFile adds a file to the set. The pkgPath must be of the form -// "canonical/pkg/path" and the path must be the absolute path to the file. A -// flag indicates whether this file was user-requested or just from following -// the import graph. -func (b *Builder) addFile(pkgPath importPathString, path string, src []byte, userRequested bool) error { - for _, p := range b.parsed[pkgPath] { - if path == p.name { - klog.V(5).Infof("addFile %s %s already parsed, skipping", pkgPath, path) - return nil - } - } - klog.V(6).Infof("addFile %s %s", pkgPath, path) - p, err := parser.ParseFile(b.fset, path, src, parser.DeclarationErrors|parser.ParseComments) - if err != nil { - return err - } - - // This is redundant with addDir, but some tests call AddFileForTest, which - // call into here without calling addDir. - b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath] - - b.parsed[pkgPath] = append(b.parsed[pkgPath], parsedFile{path, p}) - for _, c := range p.Comments { - position := b.fset.Position(c.End()) - b.endLineToCommentGroup[fileLine{position.Filename, position.Line}] = c - } - - // We have to get the packages from this specific file, in case the - // user added individual files instead of entire directories. - if b.importGraph[pkgPath] == nil { - b.importGraph[pkgPath] = map[string]struct{}{} - } - for _, im := range p.Imports { - importedPath := strings.Trim(im.Path.Value, `"`) - b.importGraph[pkgPath][importedPath] = struct{}{} - } - return nil -} - -// AddDir adds an entire directory, scanning it for go files. 'dir' should have -// a single go package in it. GOPATH, GOROOT, and the location of your go -// binary (`which go`) will all be searched if dir doesn't literally resolve. -func (b *Builder) AddDir(dir string) error { - _, err := b.importPackage(dir, true) - return err -} - -// AddDirRecursive is just like AddDir, but it also recursively adds -// subdirectories; it returns an error only if the path couldn't be resolved; -// any directories recursed into without go source are ignored. -func (b *Builder) AddDirRecursive(dir string) error { - // Add the root. - if _, err := b.importPackage(dir, true); err != nil { - klog.Warningf("Ignoring directory %v: %v", dir, err) - } - - // filepath.Walk does not follow symlinks. We therefore evaluate symlinks and use that with - // filepath.Walk. - buildPkg, ok := b.getLoadedBuildPackage(dir) - if !ok { - return fmt.Errorf("no loaded build package for %s", dir) - } - realPath, err := filepath.EvalSymlinks(buildPkg.Dir) - if err != nil { - return err - } - - fn := func(filePath string, info os.FileInfo, err error) error { - if info != nil && info.IsDir() { - rel := filepath.ToSlash(strings.TrimPrefix(filePath, realPath)) - if rel != "" { - // Make a pkg path. - buildPkg, ok := b.getLoadedBuildPackage(dir) - if !ok { - return fmt.Errorf("no loaded build package for %s", dir) - } - pkg := path.Join(string(canonicalizeImportPath(buildPkg.ImportPath)), rel) - - // Add it. - if _, err := b.importPackage(pkg, true); err != nil { - klog.Warningf("Ignoring child directory %v: %v", pkg, err) - } - } - } - return nil - } - if err := filepath.Walk(realPath, fn); err != nil { - return err - } - return nil -} - -// AddDirTo adds an entire directory to a given Universe. Unlike AddDir, this -// processes the package immediately, which makes it safe to use from within a -// generator (rather than just at init time. 'dir' must be a single go package. -// GOPATH, GOROOT, and the location of your go binary (`which go`) will all be -// searched if dir doesn't literally resolve. -// Deprecated. Please use AddDirectoryTo. -func (b *Builder) AddDirTo(dir string, u *types.Universe) error { - // We want all types from this package, as if they were directly added - // by the user. They WERE added by the user, in effect. - if _, err := b.importPackage(dir, true); err != nil { - return err - } - pkg, ok := b.getLoadedBuildPackage(dir) - if !ok { - return fmt.Errorf("no such package: %q", dir) - } - return b.findTypesIn(canonicalizeImportPath(pkg.ImportPath), u) -} - -// AddDirectoryTo adds an entire directory to a given Universe. Unlike AddDir, -// this processes the package immediately, which makes it safe to use from -// within a generator (rather than just at init time. 'dir' must be a single go -// package. GOPATH, GOROOT, and the location of your go binary (`which go`) -// will all be searched if dir doesn't literally resolve. -func (b *Builder) AddDirectoryTo(dir string, u *types.Universe) (*types.Package, error) { - // We want all types from this package, as if they were directly added - // by the user. They WERE added by the user, in effect. - if _, err := b.importPackage(dir, true); err != nil { - return nil, err - } - pkg, ok := b.getLoadedBuildPackage(dir) - if !ok || pkg == nil { - return nil, fmt.Errorf("no such package: %q", dir) - } - path := canonicalizeImportPath(pkg.ImportPath) - if err := b.findTypesIn(path, u); err != nil { - return nil, err - } - return u.Package(string(path)), nil -} - -// The implementation of AddDir. A flag indicates whether this directory was -// user-requested or just from following the import graph. -func (b *Builder) addDir(dir string, userRequested bool) error { - klog.V(5).Infof("addDir %s", dir) - buildPkg, err := b.importBuildPackage(dir) - if err != nil { - return err - } - canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) - pkgPath := canonicalPackage - if dir != string(canonicalPackage) { - klog.V(5).Infof("addDir %s, canonical path is %s", dir, pkgPath) - } - - // Sanity check the pkg dir has not changed. - if prev, found := b.absPaths[pkgPath]; found { - if buildPkg.Dir != prev { - return fmt.Errorf("package %q (%s) previously resolved to %s", pkgPath, buildPkg.Dir, prev) - } - } else { - b.absPaths[pkgPath] = buildPkg.Dir - } - - files := []string{} - files = append(files, buildPkg.GoFiles...) - if b.IncludeTestFiles { - files = append(files, buildPkg.TestGoFiles...) - } - - for _, file := range files { - if !strings.HasSuffix(file, ".go") { - continue - } - absPath := filepath.Join(buildPkg.Dir, file) - data, err := ioutil.ReadFile(absPath) - if err != nil { - return fmt.Errorf("while loading %q: %v", absPath, err) - } - err = b.addFile(pkgPath, absPath, data, userRequested) - if err != nil { - return fmt.Errorf("while parsing %q: %v", absPath, err) - } - } - return nil -} - -// regexErrPackageNotFound helps test the expected error for not finding a package. -var regexErrPackageNotFound = regexp.MustCompile(`^unable to import ".*?":.*`) - -func isErrPackageNotFound(err error) bool { - return regexErrPackageNotFound.MatchString(err.Error()) -} - -// importPackage is a function that will be called by the type check package when it -// needs to import a go package. 'path' is the import path. -func (b *Builder) importPackage(dir string, userRequested bool) (*tc.Package, error) { - klog.V(5).Infof("importPackage %s", dir) - - var pkgPath = importPathString(dir) - - // Get the canonical path if we can. - if buildPkg, _ := b.getLoadedBuildPackage(dir); buildPkg != nil { - canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) - klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage) - pkgPath = canonicalPackage - } - - // If we have not seen this before, process it now. - ignoreError := false - if _, found := b.parsed[pkgPath]; !found { - // Ignore errors in paths that we're importing solely because - // they're referenced by other packages. - ignoreError = true - - // Add it. - if err := b.addDir(dir, userRequested); err != nil { - if isErrPackageNotFound(err) { - klog.V(6).Info(err) - return nil, nil - } - - return nil, err - } - - // Get the canonical path now that it has been added. - if buildPkg, _ := b.getLoadedBuildPackage(dir); buildPkg != nil { - canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath) - klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage) - pkgPath = canonicalPackage - } - } - - // If it was previously known, just check that the user-requestedness hasn't - // changed. - b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath] - - // Run the type checker. We may end up doing this to pkgs that are already - // done, or are in the queue to be done later, but it will short-circuit, - // and we can't miss pkgs that are only depended on. - pkg, err := b.typeCheckPackage(pkgPath, !ignoreError) - if err != nil { - switch { - case ignoreError && pkg != nil: - klog.V(4).Infof("type checking encountered some issues in %q, but ignoring.\n", pkgPath) - case !ignoreError && pkg != nil: - klog.V(3).Infof("type checking encountered some errors in %q\n", pkgPath) - return nil, err - default: - return nil, err - } - } - - return pkg, nil -} - -type importAdapter struct { - b *Builder -} - -func (a importAdapter) Import(path string) (*tc.Package, error) { - return a.b.importPackage(path, false) -} - -// typeCheckPackage will attempt to return the package even if there are some -// errors, so you may check whether the package is nil or not even if you get -// an error. -func (b *Builder) typeCheckPackage(pkgPath importPathString, logErr bool) (*tc.Package, error) { - klog.V(5).Infof("typeCheckPackage %s", pkgPath) - if pkg, ok := b.typeCheckedPackages[pkgPath]; ok { - if pkg != nil { - klog.V(6).Infof("typeCheckPackage %s already done", pkgPath) - return pkg, nil - } - // We store a nil right before starting work on a package. So - // if we get here and it's present and nil, that means there's - // another invocation of this function on the call stack - // already processing this package. - return nil, fmt.Errorf("circular dependency for %q", pkgPath) - } - parsedFiles, ok := b.parsed[pkgPath] - if !ok { - return nil, fmt.Errorf("No files for pkg %q", pkgPath) - } - files := make([]*ast.File, len(parsedFiles)) - for i := range parsedFiles { - files[i] = parsedFiles[i].file - } - b.typeCheckedPackages[pkgPath] = nil - c := tc.Config{ - IgnoreFuncBodies: true, - // Note that importAdapter can call b.importPackage which calls this - // method. So there can't be cycles in the import graph. - Importer: importAdapter{b}, - Error: func(err error) { - if logErr { - klog.V(2).Infof("type checker: %v\n", err) - } else { - klog.V(3).Infof("type checker: %v\n", err) - } - }, - } - pkg, err := c.Check(string(pkgPath), b.fset, files, nil) - b.typeCheckedPackages[pkgPath] = pkg // record the result whether or not there was an error - return pkg, err -} - -// FindPackages fetches a list of the user-imported packages. -// Note that you need to call b.FindTypes() first. -func (b *Builder) FindPackages() []string { - // Iterate packages in a predictable order. - pkgPaths := []string{} - for k := range b.typeCheckedPackages { - pkgPaths = append(pkgPaths, string(k)) - } - sort.Strings(pkgPaths) - - result := []string{} - for _, pkgPath := range pkgPaths { - if b.userRequested[importPathString(pkgPath)] { - // Since walkType is recursive, all types that are in packages that - // were directly mentioned will be included. We don't need to - // include all types in all transitive packages, though. - result = append(result, pkgPath) - } - } - return result -} - -// FindTypes finalizes the package imports, and searches through all the -// packages for types. -func (b *Builder) FindTypes() (types.Universe, error) { - // Take a snapshot of pkgs to iterate, since this will recursively mutate - // b.parsed. Iterate in a predictable order. - pkgPaths := []string{} - for pkgPath := range b.parsed { - pkgPaths = append(pkgPaths, string(pkgPath)) - } - sort.Strings(pkgPaths) - - u := types.Universe{} - for _, pkgPath := range pkgPaths { - if err := b.findTypesIn(importPathString(pkgPath), &u); err != nil { - return nil, err - } - } - return u, nil -} - -// addCommentsToType takes any accumulated comment lines prior to obj and -// attaches them to the type t. -func (b *Builder) addCommentsToType(obj tc.Object, t *types.Type) { - c1 := b.priorCommentLines(obj.Pos(), 1) - // c1.Text() is safe if c1 is nil - t.CommentLines = splitLines(c1.Text()) - if c1 == nil { - t.SecondClosestCommentLines = splitLines(b.priorCommentLines(obj.Pos(), 2).Text()) - } else { - t.SecondClosestCommentLines = splitLines(b.priorCommentLines(c1.List[0].Slash, 2).Text()) - } -} - -// findTypesIn finalizes the package import and searches through the package -// for types. -func (b *Builder) findTypesIn(pkgPath importPathString, u *types.Universe) error { - klog.V(5).Infof("findTypesIn %s", pkgPath) - pkg := b.typeCheckedPackages[pkgPath] - if pkg == nil { - return fmt.Errorf("findTypesIn(%s): package is not known", pkgPath) - } - if !b.userRequested[pkgPath] { - // Since walkType is recursive, all types that the - // packages they asked for depend on will be included. - // But we don't need to include all types in all - // *packages* they depend on. - klog.V(5).Infof("findTypesIn %s: package is not user requested", pkgPath) - return nil - } - - // We're keeping this package. This call will create the record. - u.Package(string(pkgPath)).Name = pkg.Name() - u.Package(string(pkgPath)).Path = pkg.Path() - u.Package(string(pkgPath)).SourcePath = b.absPaths[pkgPath] - - for _, f := range b.parsed[pkgPath] { - if _, fileName := filepath.Split(f.name); fileName == "doc.go" { - tp := u.Package(string(pkgPath)) - // findTypesIn might be called multiple times. Clean up tp.Comments - // to avoid repeatedly fill same comments to it. - tp.Comments = []string{} - for i := range f.file.Comments { - tp.Comments = append(tp.Comments, splitLines(f.file.Comments[i].Text())...) - } - if f.file.Doc != nil { - tp.DocComments = splitLines(f.file.Doc.Text()) - } - } - } - - s := pkg.Scope() - for _, n := range s.Names() { - obj := s.Lookup(n) - tn, ok := obj.(*tc.TypeName) - if ok { - t := b.walkType(*u, nil, tn.Type()) - b.addCommentsToType(obj, t) - } - tf, ok := obj.(*tc.Func) - // We only care about functions, not concrete/abstract methods. - if ok && tf.Type() != nil && tf.Type().(*tc.Signature).Recv() == nil { - t := b.addFunction(*u, nil, tf) - b.addCommentsToType(obj, t) - } - tv, ok := obj.(*tc.Var) - if ok && !tv.IsField() { - t := b.addVariable(*u, nil, tv) - b.addCommentsToType(obj, t) - } - tconst, ok := obj.(*tc.Const) - if ok { - t := b.addConstant(*u, nil, tconst) - b.addCommentsToType(obj, t) - } - } - - importedPkgs := []string{} - for k := range b.importGraph[pkgPath] { - importedPkgs = append(importedPkgs, string(k)) - } - sort.Strings(importedPkgs) - for _, p := range importedPkgs { - u.AddImports(string(pkgPath), p) - } - return nil -} - -func (b *Builder) importWithMode(dir string, mode build.ImportMode) (*build.Package, error) { - // This is a bit of a hack. The srcDir argument to Import() should - // properly be the dir of the file which depends on the package to be - // imported, so that vendoring can work properly and local paths can - // resolve. We assume that there is only one level of vendoring, and that - // the CWD is inside the GOPATH, so this should be safe. Nobody should be - // using local (relative) paths except on the CLI, so CWD is also - // sufficient. - cwd, err := os.Getwd() - if err != nil { - return nil, fmt.Errorf("unable to get current directory: %v", err) - } - - // normalize to drop /vendor/ if present - dir = string(canonicalizeImportPath(dir)) - - buildPkg, err := b.context.Import(filepath.ToSlash(dir), cwd, mode) - if err != nil { - return nil, err - } - return buildPkg, nil -} - -// if there's a comment on the line `lines` before pos, return its text, otherwise "". -func (b *Builder) priorCommentLines(pos token.Pos, lines int) *ast.CommentGroup { - position := b.fset.Position(pos) - key := fileLine{position.Filename, position.Line - lines} - return b.endLineToCommentGroup[key] -} - -func splitLines(str string) []string { - return strings.Split(strings.TrimRight(str, "\n"), "\n") -} - -func tcFuncNameToName(in string) types.Name { - name := strings.TrimPrefix(in, "func ") - nameParts := strings.Split(name, "(") - return tcNameToName(nameParts[0]) -} - -func tcVarNameToName(in string) types.Name { - nameParts := strings.Split(in, " ") - // nameParts[0] is "var". - // nameParts[2:] is the type of the variable, we ignore it for now. - return tcNameToName(nameParts[1]) -} - -func tcNameToName(in string) types.Name { - // Detect anonymous type names. (These may have '.' characters because - // embedded types may have packages, so we detect them specially.) - if strings.HasPrefix(in, "struct{") || - strings.HasPrefix(in, "<-chan") || - strings.HasPrefix(in, "chan<-") || - strings.HasPrefix(in, "chan ") || - strings.HasPrefix(in, "func(") || - strings.HasPrefix(in, "func (") || - strings.HasPrefix(in, "*") || - strings.HasPrefix(in, "map[") || - strings.HasPrefix(in, "[") { - return types.Name{Name: in} - } - - // Otherwise, if there are '.' characters present, the name has a - // package path in front. - nameParts := strings.Split(in, ".") - name := types.Name{Name: in} - if n := len(nameParts); n >= 2 { - // The final "." is the name of the type--previous ones must - // have been in the package path. - name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1] - } - return name -} - -func (b *Builder) convertSignature(u types.Universe, t *tc.Signature) *types.Signature { - signature := &types.Signature{} - for i := 0; i < t.Params().Len(); i++ { - signature.Parameters = append(signature.Parameters, b.walkType(u, nil, t.Params().At(i).Type())) - signature.ParameterNames = append(signature.ParameterNames, t.Params().At(i).Name()) - } - for i := 0; i < t.Results().Len(); i++ { - signature.Results = append(signature.Results, b.walkType(u, nil, t.Results().At(i).Type())) - signature.ResultNames = append(signature.ResultNames, t.Results().At(i).Name()) - } - if r := t.Recv(); r != nil { - signature.Receiver = b.walkType(u, nil, r.Type()) - } - signature.Variadic = t.Variadic() - return signature -} - -// walkType adds the type, and any necessary child types. -func (b *Builder) walkType(u types.Universe, useName *types.Name, in tc.Type) *types.Type { - // Most of the cases are underlying types of the named type. - name := tcNameToName(in.String()) - if useName != nil { - name = *useName - } - - switch t := in.(type) { - case *tc.Struct: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Struct - for i := 0; i < t.NumFields(); i++ { - f := t.Field(i) - m := types.Member{ - Name: f.Name(), - Embedded: f.Anonymous(), - Tags: t.Tag(i), - Type: b.walkType(u, nil, f.Type()), - CommentLines: splitLines(b.priorCommentLines(f.Pos(), 1).Text()), - } - out.Members = append(out.Members, m) - } - return out - case *tc.Map: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Map - out.Elem = b.walkType(u, nil, t.Elem()) - out.Key = b.walkType(u, nil, t.Key()) - return out - case *tc.Pointer: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Pointer - out.Elem = b.walkType(u, nil, t.Elem()) - return out - case *tc.Slice: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Slice - out.Elem = b.walkType(u, nil, t.Elem()) - return out - case *tc.Array: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Array - out.Elem = b.walkType(u, nil, t.Elem()) - out.Len = in.(*tc.Array).Len() - return out - case *tc.Chan: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Chan - out.Elem = b.walkType(u, nil, t.Elem()) - // TODO: need to store direction, otherwise raw type name - // cannot be properly written. - return out - case *tc.Basic: - out := u.Type(types.Name{ - Package: "", - Name: t.Name(), - }) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Unsupported - return out - case *tc.Signature: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Func - out.Signature = b.convertSignature(u, t) - return out - case *tc.Interface: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Interface - t.Complete() - for i := 0; i < t.NumMethods(); i++ { - if out.Methods == nil { - out.Methods = map[string]*types.Type{} - } - method := t.Method(i) - name := tcNameToName(method.String()) - mt := b.walkType(u, &name, method.Type()) - mt.CommentLines = splitLines(b.priorCommentLines(method.Pos(), 1).Text()) - out.Methods[method.Name()] = mt - } - return out - case *tc.Named: - var out *types.Type - switch t.Underlying().(type) { - case *tc.Named, *tc.Basic, *tc.Map, *tc.Slice: - name := tcNameToName(t.String()) - out = u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Alias - out.Underlying = b.walkType(u, nil, t.Underlying()) - default: - // tc package makes everything "named" with an - // underlying anonymous type--we remove that annoying - // "feature" for users. This flattens those types - // together. - name := tcNameToName(t.String()) - if out := u.Type(name); out.Kind != types.Unknown { - return out // short circuit if we've already made this. - } - out = b.walkType(u, &name, t.Underlying()) - } - // If the underlying type didn't already add methods, add them. - // (Interface types will have already added methods.) - if len(out.Methods) == 0 { - for i := 0; i < t.NumMethods(); i++ { - if out.Methods == nil { - out.Methods = map[string]*types.Type{} - } - method := t.Method(i) - name := tcNameToName(method.String()) - mt := b.walkType(u, &name, method.Type()) - mt.CommentLines = splitLines(b.priorCommentLines(method.Pos(), 1).Text()) - out.Methods[method.Name()] = mt - } - } - return out - default: - out := u.Type(name) - if out.Kind != types.Unknown { - return out - } - out.Kind = types.Unsupported - klog.Warningf("Making unsupported type entry %q for: %#v\n", out, t) - return out - } -} - -func (b *Builder) addFunction(u types.Universe, useName *types.Name, in *tc.Func) *types.Type { - name := tcFuncNameToName(in.String()) - if useName != nil { - name = *useName - } - out := u.Function(name) - out.Kind = types.DeclarationOf - out.Underlying = b.walkType(u, nil, in.Type()) - return out -} - -func (b *Builder) addVariable(u types.Universe, useName *types.Name, in *tc.Var) *types.Type { - name := tcVarNameToName(in.String()) - if useName != nil { - name = *useName - } - out := u.Variable(name) - out.Kind = types.DeclarationOf - out.Underlying = b.walkType(u, nil, in.Type()) - return out -} - -func (b *Builder) addConstant(u types.Universe, useName *types.Name, in *tc.Const) *types.Type { - name := tcVarNameToName(in.String()) - if useName != nil { - name = *useName - } - out := u.Constant(name) - out.Kind = types.DeclarationOf - out.Underlying = b.walkType(u, nil, in.Type()) - - var constval string - - // For strings, we use `StringVal()` to get the un-truncated, - // un-quoted string. For other values, `.String()` is preferable to - // get something relatively human readable (especially since for - // floating point types, `ExactString()` will generate numeric - // expressions using `big.(*Float).Text()`. - switch in.Val().Kind() { - case constant.String: - constval = constant.StringVal(in.Val()) - default: - constval = in.Val().String() - } - - out.ConstValue = &constval - return out -} - -// canonicalizeImportPath takes an import path and returns the actual package. -// It doesn't support nested vendoring. -func canonicalizeImportPath(importPath string) importPathString { - if !strings.Contains(importPath, "/vendor/") { - return importPathString(importPath) - } - - return importPathString(importPath[strings.Index(importPath, "/vendor/")+len("/vendor/"):]) -} diff --git a/vendor/k8s.io/gengo/types/flatten.go b/vendor/k8s.io/gengo/types/flatten.go deleted file mode 100644 index 585014e8..00000000 --- a/vendor/k8s.io/gengo/types/flatten.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -// FlattenMembers recursively takes any embedded members and puts them in the -// top level, correctly hiding them if the top level hides them. There must not -// be a cycle-- that implies infinite members. -// -// This is useful for e.g. computing all the valid keys in a json struct, -// properly considering any configuration of embedded structs. -func FlattenMembers(m []Member) []Member { - embedded := []Member{} - normal := []Member{} - type nameInfo struct { - top bool - i int - } - names := map[string]nameInfo{} - for i := range m { - if m[i].Embedded && m[i].Type.Kind == Struct { - embedded = append(embedded, m[i]) - } else { - normal = append(normal, m[i]) - names[m[i].Name] = nameInfo{true, len(normal) - 1} - } - } - for i := range embedded { - for _, e := range FlattenMembers(embedded[i].Type.Members) { - if info, found := names[e.Name]; found { - if info.top { - continue - } - if n := normal[info.i]; n.Name == e.Name && n.Type == e.Type { - continue - } - panic("conflicting members") - } - normal = append(normal, e) - names[e.Name] = nameInfo{false, len(normal) - 1} - } - } - return normal -} diff --git a/vendor/k8s.io/gengo/LICENSE b/vendor/k8s.io/gengo/v2/LICENSE similarity index 100% rename from vendor/k8s.io/gengo/LICENSE rename to vendor/k8s.io/gengo/v2/LICENSE diff --git a/vendor/k8s.io/gengo/v2/README.md b/vendor/k8s.io/gengo/v2/README.md new file mode 100644 index 00000000..79d1070d --- /dev/null +++ b/vendor/k8s.io/gengo/v2/README.md @@ -0,0 +1,53 @@ +[![GoDoc Widget]][GoDoc] [![GoReport]][GoReportStatus] + +[GoDoc]: https://godoc.org/k8s.io/gengo +[GoDoc Widget]: https://godoc.org/k8s.io/gengo?status.svg +[GoReport]: https://goreportcard.com/badge/github.com/kubernetes/gengo +[GoReportStatus]: https://goreportcard.com/report/github.com/kubernetes/gengo + +# Gengo: a framework for building simple code generators + +This repo is used by Kubernetes to build some codegen tooling. It is not +intended to be general-purpose and makes some assumptions that may not hold +outside of Kubernetes. + +In the past this repo was partially supported for external use (outside of the +Kubernetes project overall), but that is no longer true. We may change the API +in incompatible ways, without warning. + +If you are not building something that is part of Kubernetes, DO NOT DEPEND ON +THIS REPO. + +## New usage within Kubernetes + +Gengo is a very opinionated framework. It is primarily aimed at generating Go +code derived from types defined in other Go code, but it is possible to use it +for other things (e.g. proto files). Net new tools should consider using +`golang.org/x/tools/go/packages` directly. Gengo can serve as an example of +how to do that. + +If you still decide you want to use gengo, see the +[simple examples](./examples) in this repo or the more extensive tools in the +Kubernetes [code-generator](https://github.com/kubernetes/code-generator/) +repo. + +## Overview + +Gengo is used to build tools (generally a tool is a binary). Each tool +describes some number of `Targets`. A target is a single output package, which +may be the same as the inputs (if the tool generates code alongside the inputs) +or different. Each `Target` describes some number of `Generators`. A +generator is responsible for emitting a single file into the target directory. + +Gengo helps the tool to load and process input packages, e.g. extracting type +information and associating comments. Each target will be offered every known +type, and can filter that down to the set of types it cares about. Each +generator will be offered the result of the target's filtering, and can filter +the set of types further. Finally, the generator will be called to emit code +for all of the remaining types. + +The `tracer` example in this repo can be used to examine all of the hooks. + +## Contributing + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for instructions on how to contribute. diff --git a/vendor/k8s.io/gengo/types/comments.go b/vendor/k8s.io/gengo/v2/comments.go similarity index 86% rename from vendor/k8s.io/gengo/types/comments.go rename to vendor/k8s.io/gengo/v2/comments.go index 8150c383..ba49c432 100644 --- a/vendor/k8s.io/gengo/types/comments.go +++ b/vendor/k8s.io/gengo/v2/comments.go @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package types contains go type information, packaged in a way that makes -// auto-generation convenient, whether by template or straight go functions. -package types +package gengo import ( "fmt" @@ -25,7 +23,7 @@ import ( // ExtractCommentTags parses comments for lines of the form: // -// 'marker' + "key=value". +// 'marker' + "key=value". // // Values are optional; "" is the default. A tag can be specified more than // one time and all values are returned. If the resulting map has an entry for @@ -33,12 +31,15 @@ import ( // // Example: if you pass "+" for 'marker', and the following lines are in // the comments: -// +foo=value1 -// +bar -// +foo=value2 -// +baz="qux" +// +// +foo=value1 +// +bar +// +foo=value2 +// +baz="qux" +// // Then this function will return: -// map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {"qux"}} +// +// map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {"qux"}} func ExtractCommentTags(marker string, lines []string) map[string][]string { out := map[string][]string{} for _, line := range lines { @@ -62,7 +63,7 @@ func ExtractCommentTags(marker string, lines []string) map[string][]string { // ExtractSingleBoolCommentTag parses comments for lines of the form: // -// 'marker' + "key=value1" +// 'marker' + "key=value1" // // If the tag is not found, the default value is returned. Values are asserted // to be boolean ("true" or "false"), and any other value will cause an error diff --git a/vendor/k8s.io/gengo/v2/execute.go b/vendor/k8s.io/gengo/v2/execute.go new file mode 100644 index 00000000..c4aba2b1 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/execute.go @@ -0,0 +1,98 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package gengo is a code-generation framework. +package gengo + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/parser" +) + +// StdBuildTag is a suggested build-tag which tools can use both as an argument +// to GoBoilerplate and to Execute. +const StdBuildTag = "ignore_autogenerated" + +// StdGeneratedBy is a suggested "generated by" line which tools can use as an +// argument to GoBoilerplate. +const StdGeneratedBy = "// Code generated by GENERATOR_NAME. DO NOT EDIT." + +// GoBoilerplate returns the Go file header: +// - an optional build tag (negative, set it to ignore generated code) +// - an optional boilerplate file +// - an optional "generated by" comment +func GoBoilerplate(headerFile, buildTag, generatedBy string) ([]byte, error) { + buf := bytes.Buffer{} + + if buildTag != "" { + buf.WriteString( + fmt.Sprintf("//go:build !%s\n// +build !%s\n\n", buildTag, buildTag)) + } + + if headerFile != "" { + b, err := os.ReadFile(headerFile) + if err != nil { + return nil, err + } + b = bytes.ReplaceAll(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().UTC().Year()))) + buf.Write(b) + buf.WriteByte('\n') + } + + if generatedBy != "" { + generatorName := filepath.Base(os.Args[0]) + // Strip the extension from the name to normalize output between *nix and Windows. + generatorName = generatorName[:len(generatorName)-len(filepath.Ext(generatorName))] + generatedByComment := strings.ReplaceAll(generatedBy, "GENERATOR_NAME", generatorName) + buf.WriteString(fmt.Sprintf("%s\n\n", generatedByComment)) + } + + return buf.Bytes(), nil +} + +// Execute implements most of a tool's main loop. +func Execute(nameSystems namer.NameSystems, defaultSystem string, getTargets func(*generator.Context) []generator.Target, buildTag string, patterns []string) error { + var buildTags []string + if buildTag != "" { + buildTags = append(buildTags, buildTag) + } + + p := parser.NewWithOptions(parser.Options{BuildTags: buildTags}) + if err := p.LoadPackages(patterns...); err != nil { + return fmt.Errorf("failed making a parser: %v", err) + } + + c, err := generator.NewContext(p, nameSystems, defaultSystem) + if err != nil { + return fmt.Errorf("failed making a context: %v", err) + } + + targets := getTargets(c) + if err := c.ExecuteTargets(targets); err != nil { + return fmt.Errorf("failed executing generator: %v", err) + } + + return nil +} diff --git a/vendor/k8s.io/gengo/generator/doc.go b/vendor/k8s.io/gengo/v2/generator/doc.go similarity index 96% rename from vendor/k8s.io/gengo/generator/doc.go rename to vendor/k8s.io/gengo/v2/generator/doc.go index d8e12534..ef0031cd 100644 --- a/vendor/k8s.io/gengo/generator/doc.go +++ b/vendor/k8s.io/gengo/v2/generator/doc.go @@ -28,4 +28,4 @@ limitations under the License. // package. Additionally, all naming systems in the Context will be added as // functions to the parsed template, so that they can be called directly from // your templates! -package generator // import "k8s.io/gengo/generator" +package generator // import "k8s.io/gengo/v2/generator" diff --git a/vendor/k8s.io/gengo/generator/error_tracker.go b/vendor/k8s.io/gengo/v2/generator/error_tracker.go similarity index 100% rename from vendor/k8s.io/gengo/generator/error_tracker.go rename to vendor/k8s.io/gengo/v2/generator/error_tracker.go diff --git a/vendor/k8s.io/gengo/generator/execute.go b/vendor/k8s.io/gengo/v2/generator/execute.go similarity index 54% rename from vendor/k8s.io/gengo/generator/execute.go rename to vendor/k8s.io/gengo/v2/generator/execute.go index f096741b..02b4c331 100644 --- a/vendor/k8s.io/gengo/generator/execute.go +++ b/vendor/k8s.io/gengo/v2/generator/execute.go @@ -18,42 +18,31 @@ package generator import ( "bytes" + "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" "golang.org/x/tools/imports" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" - + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) -func errs2strings(errors []error) []string { - strs := make([]string, len(errors)) - for i := range errors { - strs[i] = errors[i].Error() - } - return strs -} +// ExecuteTargets runs the generators for the provided targets. +func (c *Context) ExecuteTargets(targets []Target) error { + klog.V(5).Infof("ExecuteTargets: %d targets", len(targets)) -// ExecutePackages runs the generators for every package in 'packages'. 'outDir' -// is the base directory in which to place all the generated packages; it -// should be a physical path on disk, not an import path. e.g.: -// /path/to/home/path/to/gopath/src/ -// Each package has its import path already, this will be appended to 'outDir'. -func (c *Context) ExecutePackages(outDir string, packages Packages) error { - var errors []error - for _, p := range packages { - if err := c.ExecutePackage(outDir, p); err != nil { - errors = append(errors, err) + var errs []error + for _, tgt := range targets { + if err := c.ExecuteTarget(tgt); err != nil { + errs = append(errs, err) } } - if len(errors) > 0 { - return fmt.Errorf("some packages had errors:\n%v\n", strings.Join(errs2strings(errors), "\n")) + if len(errs) > 0 { + return fmt.Errorf("some targets had errors: %w", errors.Join(errs...)) } return nil } @@ -65,6 +54,7 @@ type DefaultFileType struct { func (ft DefaultFileType) AssembleFile(f *File, pathname string) error { klog.V(5).Infof("Assembling file %q", pathname) + destFile, err := os.Create(pathname) if err != nil { return err @@ -78,7 +68,7 @@ func (ft DefaultFileType) AssembleFile(f *File, pathname string) error { return et.Error() } if formatted, err := ft.Format(b.Bytes()); err != nil { - err = fmt.Errorf("unable to format file %q (%v).", pathname, err) + err = fmt.Errorf("unable to format file %q (%v)", pathname, err) // Write the file anyway, so they can see what's going wrong and fix the generator. if _, err2 := destFile.Write(b.Bytes()); err2 != nil { return err2 @@ -90,42 +80,7 @@ func (ft DefaultFileType) AssembleFile(f *File, pathname string) error { } } -func (ft DefaultFileType) VerifyFile(f *File, pathname string) error { - klog.V(5).Infof("Verifying file %q", pathname) - friendlyName := filepath.Join(f.PackageName, f.Name) - b := &bytes.Buffer{} - et := NewErrorTracker(b) - ft.Assemble(et, f) - if et.Error() != nil { - return et.Error() - } - formatted, err := ft.Format(b.Bytes()) - if err != nil { - return fmt.Errorf("unable to format the output for %q: %v", friendlyName, err) - } - existing, err := ioutil.ReadFile(pathname) - if err != nil { - return fmt.Errorf("unable to read file %q for comparison: %v", friendlyName, err) - } - if bytes.Compare(formatted, existing) == 0 { - return nil - } - // Be nice and find the first place where they differ - i := 0 - for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] { - i++ - } - eDiff, fDiff := existing[i:], formatted[i:] - if len(eDiff) > 100 { - eDiff = eDiff[:100] - } - if len(fDiff) > 100 { - fDiff = fDiff[:100] - } - return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", friendlyName, string(eDiff), string(fDiff)) -} - -func assembleGolangFile(w io.Writer, f *File) { +func assembleGoFile(w io.Writer, f *File) { w.Write(f.Header) fmt.Fprintf(w, "package %v\n\n", f.PackageName) @@ -162,10 +117,10 @@ func importsWrapper(src []byte) ([]byte, error) { return imports.Process("", src, nil) } -func NewGolangFile() *DefaultFileType { +func NewGoFile() *DefaultFileType { return &DefaultFileType{ Format: importsWrapper, - Assemble: assembleGolangFile, + Assemble: assembleGoFile, } } @@ -208,33 +163,23 @@ func (c *Context) addNameSystems(namers namer.NameSystems) *Context { return &c2 } -// ExecutePackage executes a single package. 'outDir' is the base directory in -// which to place the package; it should be a physical path on disk, not an -// import path. e.g.: '/path/to/home/path/to/gopath/src/' The package knows its -// import path already, this will be appended to 'outDir'. -func (c *Context) ExecutePackage(outDir string, p Package) error { - path := filepath.Join(outDir, p.Path()) +// ExecuteTarget runs the generators for a single target. +func (c *Context) ExecuteTarget(tgt Target) error { + tgtDir := tgt.Dir() + if tgtDir == "" { + return fmt.Errorf("no directory for target %s", tgt.Path()) + } + klog.V(5).Infof("Executing target %q (%q)", tgt.Name(), tgtDir) - // When working outside of GOPATH, we typically won't want to generate the - // full path for a package. For example, if our current project's root/base - // package is github.com/foo/bar, outDir=., p.Path()=github.com/foo/bar/generated, - // then we really want to be writing files to ./generated, not ./github.com/foo/bar/generated. - // The following will trim a path prefix (github.com/foo/bar) from p.Path() to arrive at - // a relative path that works with projects not in GOPATH. - if c.TrimPathPrefix != "" { - separator := string(filepath.Separator) - if !strings.HasSuffix(c.TrimPathPrefix, separator) { - c.TrimPathPrefix += separator - } + // Filter out any types the *package* doesn't care about. + packageContext := c.filteredBy(tgt.Filter) - path = strings.TrimPrefix(path, c.TrimPathPrefix) + if err := os.MkdirAll(tgtDir, 0755); err != nil { + return err } - klog.V(5).Infof("Processing package %q, disk location %q", p.Name(), path) - // Filter out any types the *package* doesn't care about. - packageContext := c.filteredBy(p.Filter) - os.MkdirAll(path, 0755) + files := map[string]*File{} - for _, g := range p.Generators(packageContext) { + for _, g := range tgt.Generators(packageContext) { // Filter out types the *generator* doesn't care about. genContext := packageContext.filteredBy(g.Filter) // Now add any extra name systems defined by this generator @@ -248,19 +193,17 @@ func (c *Context) ExecutePackage(outDir string, p Package) error { if f == nil { // This is the first generator to reference this file, so start it. f = &File{ - Name: g.Filename(), - FileType: fileType, - PackageName: p.Name(), - PackagePath: p.Path(), - PackageSourcePath: p.SourcePath(), - Header: p.Header(g.Filename()), - Imports: map[string]struct{}{}, + Name: g.Filename(), + FileType: fileType, + PackageName: tgt.Name(), + PackagePath: tgt.Path(), + PackageDir: tgt.Dir(), + Header: tgt.Header(g.Filename()), + Imports: map[string]struct{}{}, } files[f.Name] = f - } else { - if f.FileType != g.FileType() { - return fmt.Errorf("file %q already has type %q, but generator %q wants to use type %q", f.Name, f.FileType, g.Name(), g.FileType()) - } + } else if f.FileType != g.FileType() { + return fmt.Errorf("file %q already has type %q, but generator %q wants to use type %q", f.Name, f.FileType, g.Name(), g.FileType()) } if vars := g.PackageVars(genContext); len(vars) > 0 { @@ -289,25 +232,19 @@ func (c *Context) ExecutePackage(outDir string, p Package) error { } } - var errors []error + var errs []error for _, f := range files { - finalPath := filepath.Join(path, f.Name) + finalPath := filepath.Join(tgtDir, f.Name) assembler, ok := c.FileTypes[f.FileType] if !ok { return fmt.Errorf("the file type %q registered for file %q does not exist in the context", f.FileType, f.Name) } - var err error - if c.Verify { - err = assembler.VerifyFile(f, finalPath) - } else { - err = assembler.AssembleFile(f, finalPath) - } - if err != nil { - errors = append(errors, err) + if err := assembler.AssembleFile(f, finalPath); err != nil { + errs = append(errs, err) } } - if len(errors) > 0 { - return fmt.Errorf("errors in package %q:\n%v\n", p.Path(), strings.Join(errs2strings(errors), "\n")) + if len(errs) > 0 { + return fmt.Errorf("errors in target %q: %w", tgt.Path(), errors.Join(errs...)) } return nil } diff --git a/vendor/k8s.io/gengo/generator/generator.go b/vendor/k8s.io/gengo/v2/generator/generator.go similarity index 63% rename from vendor/k8s.io/gengo/generator/generator.go rename to vendor/k8s.io/gengo/v2/generator/generator.go index 5614ae3b..7dfb1b2b 100644 --- a/vendor/k8s.io/gengo/generator/generator.go +++ b/vendor/k8s.io/gengo/v2/generator/generator.go @@ -20,19 +20,22 @@ import ( "bytes" "io" - "k8s.io/gengo/namer" - "k8s.io/gengo/parser" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/parser" + "k8s.io/gengo/v2/types" ) -// Package contains the contract for generating a package. -type Package interface { - // Name returns the package short name. +// Target describes a Go package into which code will be generated. A single +// Target may have many Generators, each of which emits one file. +type Target interface { + // Name returns the package short name (as in `package foo`). Name() string - // Path returns the package import path. + // Path returns the package import path (as in `import "example.com/foo"`). Path() string - // SourcePath returns the location of the package on disk. - SourcePath() string + // Dir returns the location of the resulting package on disk. This may be + // the same directory as an input package (when generating code in-place) + // or a different directory entirely. + Dir() string // Filter should return true if this package cares about this type. // Otherwise, this type will be omitted from the type ordering for @@ -52,26 +55,22 @@ type Package interface { } type File struct { - Name string - FileType string - PackageName string - Header []byte - PackagePath string - PackageSourcePath string - Imports map[string]struct{} - Vars bytes.Buffer - Consts bytes.Buffer - Body bytes.Buffer + Name string + FileType string + PackageName string + Header []byte + PackagePath string + PackageDir string + Imports map[string]struct{} + Vars bytes.Buffer + Consts bytes.Buffer + Body bytes.Buffer } type FileType interface { AssembleFile(f *File, path string) error - VerifyFile(f *File, path string) error } -// Packages is a list of packages to generate. -type Packages []Package - // Generator is the contract for anything that wants to do auto-generation. // It's expected that the io.Writers passed to the below functions will be // ErrorTrackers; this allows implementations to not check for io errors, @@ -110,7 +109,7 @@ type Generator interface { // Init should write an init function, and any other content that's not // generated per-type. (It's not intended for generator specific - // initialization! Do that when your Package constructs the + // initialization! Do that when your Target constructs the // Generators.) Init(*Context, io.Writer) error @@ -160,38 +159,25 @@ type Context struct { // All the types, in case you want to look up something. Universe types.Universe - // Incoming imports, i.e. packages importing the given package. - incomingImports map[string][]string - - // Incoming transitive imports, i.e. the transitive closure of IncomingImports - incomingTransitiveImports map[string][]string - // All the user-specified packages. This is after recursive expansion. Inputs []string // The canonical ordering of the types (will be filtered by both the - // Package's and Generator's Filter methods). + // Target's and Generator's Filter methods). Order []*types.Type // A set of types this context can process. If this is empty or nil, - // the default "golang" filetype will be provided. + // the default "go" filetype will be provided. FileTypes map[string]FileType - // If true, Execute* calls will just verify that the existing output is - // correct. (You may set this after calling NewContext.) - Verify bool - // Allows generators to add packages at runtime. - builder *parser.Builder - - // If specified, trim the prefix from a package's path before writing files. - TrimPathPrefix string + parser *parser.Parser } -// NewContext generates a context from the given builder, naming systems, and +// NewContext generates a context from the given parser, naming systems, and // the naming system you wish to construct the canonical ordering from. -func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrderName string) (*Context, error) { - universe, err := b.FindTypes() +func NewContext(p *parser.Parser, nameSystems namer.NameSystems, canonicalOrderName string) (*Context, error) { + universe, err := p.NewUniverse() if err != nil { return nil, err } @@ -199,11 +185,11 @@ func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrder c := &Context{ Namers: namer.NameSystems{}, Universe: universe, - Inputs: b.FindPackages(), + Inputs: p.UserRequestedPackages(), FileTypes: map[string]FileType{ - GolangFileType: NewGolangFile(), + GoFileType: NewGoFile(), }, - builder: b, + parser: p, } for name, systemNamer := range nameSystems { @@ -216,44 +202,13 @@ func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrder return c, nil } -// IncomingImports returns the incoming imports for each package. The map is lazily computed. -func (ctxt *Context) IncomingImports() map[string][]string { - if ctxt.incomingImports == nil { - incoming := map[string][]string{} - for _, pkg := range ctxt.Universe { - for imp := range pkg.Imports { - incoming[imp] = append(incoming[imp], pkg.Path) - } - } - ctxt.incomingImports = incoming - } - return ctxt.incomingImports -} - -// TransitiveIncomingImports returns the transitive closure of the incoming imports for each package. -// The map is lazily computed. -func (ctxt *Context) TransitiveIncomingImports() map[string][]string { - if ctxt.incomingTransitiveImports == nil { - ctxt.incomingTransitiveImports = transitiveClosure(ctxt.IncomingImports()) - } - return ctxt.incomingTransitiveImports -} - -// AddDir adds a Go package to the context. The specified path must be a single -// go package import path. GOPATH, GOROOT, and the location of your go binary -// (`which go`) will all be searched, in the normal Go fashion. -// Deprecated. Please use AddDirectory. -func (ctxt *Context) AddDir(path string) error { - ctxt.incomingImports = nil - ctxt.incomingTransitiveImports = nil - return ctxt.builder.AddDirTo(path, &ctxt.Universe) +// LoadPackages adds Go packages to the context. +func (c *Context) LoadPackages(patterns ...string) ([]*types.Package, error) { + return c.parser.LoadPackagesTo(&c.Universe, patterns...) } -// AddDirectory adds a Go package to the context. The specified path must be a -// single go package import path. GOPATH, GOROOT, and the location of your go -// binary (`which go`) will all be searched, in the normal Go fashion. -func (ctxt *Context) AddDirectory(path string) (*types.Package, error) { - ctxt.incomingImports = nil - ctxt.incomingTransitiveImports = nil - return ctxt.builder.AddDirectoryTo(path, &ctxt.Universe) +// FindPackages expands Go package patterns into a list of package import +// paths, akin to `go list -find`. +func (c *Context) FindPackages(patterns ...string) ([]string, error) { + return c.parser.FindPackages(patterns...) } diff --git a/vendor/k8s.io/gengo/v2/generator/go_generator.go b/vendor/k8s.io/gengo/v2/generator/go_generator.go new file mode 100644 index 00000000..14d2148b --- /dev/null +++ b/vendor/k8s.io/gengo/v2/generator/go_generator.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "io" + + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" +) + +const ( + GoFileType = "go" +) + +// GoGenerator implements a do-nothing Generator for Go files. It can be +// used as a base for custom Generators, which embed it and then define the +// methods they need to specialize. +type GoGenerator struct { + // OutputFilename is used as the Generator's name, and filename. + OutputFilename string + + // Body, if present, will be used as the return from the "Init" method. + // This causes it to be static content for the entire file if no other + // generator touches the file. + OptionalBody []byte +} + +func (gg GoGenerator) Name() string { return gg.OutputFilename } +func (gg GoGenerator) Filter(*Context, *types.Type) bool { return true } +func (gg GoGenerator) Namers(*Context) namer.NameSystems { return nil } +func (gg GoGenerator) Imports(*Context) []string { return []string{} } +func (gg GoGenerator) PackageVars(*Context) []string { return []string{} } +func (gg GoGenerator) PackageConsts(*Context) []string { return []string{} } +func (gg GoGenerator) GenerateType(*Context, *types.Type, io.Writer) error { return nil } +func (gg GoGenerator) Filename() string { return gg.OutputFilename } +func (gg GoGenerator) FileType() string { return GoFileType } +func (gg GoGenerator) Finalize(*Context, io.Writer) error { return nil } + +func (gg GoGenerator) Init(c *Context, w io.Writer) error { + _, err := w.Write(gg.OptionalBody) + return err +} + +var ( + _ = Generator(GoGenerator{}) +) diff --git a/vendor/k8s.io/gengo/generator/import_tracker.go b/vendor/k8s.io/gengo/v2/generator/import_tracker.go similarity index 55% rename from vendor/k8s.io/gengo/generator/import_tracker.go rename to vendor/k8s.io/gengo/v2/generator/import_tracker.go index 60c899ac..70b86cf5 100644 --- a/vendor/k8s.io/gengo/generator/import_tracker.go +++ b/vendor/k8s.io/gengo/v2/generator/import_tracker.go @@ -22,22 +22,41 @@ import ( "k8s.io/klog/v2" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" ) -func NewImportTracker(typesToAdd ...*types.Type) namer.ImportTracker { - tracker := namer.NewDefaultImportTracker(types.Name{}) +// NewImportTrackerForPackage creates a new import tracker which is aware +// of a generator's output package. The tracker will not add import lines +// when symbols or types are added from the same package, and LocalNameOf +// will return empty string for the output package. +// +// e.g.: +// +// tracker := NewImportTrackerForPackage("bar.com/pkg/foo") +// tracker.AddSymbol(types.Name{"bar.com/pkg/foo.MyType"}) +// tracker.AddSymbol(types.Name{"bar.com/pkg/baz.MyType"}) +// tracker.AddSymbol(types.Name{"bar.com/pkg/baz/baz.MyType"}) +// +// tracker.LocalNameOf("bar.com/pkg/foo") -> "" +// tracker.LocalNameOf("bar.com/pkg/baz") -> "baz" +// tracker.LocalNameOf("bar.com/pkg/baz/baz") -> "bazbaz" +// tracker.ImportLines() -> {`baz "bar.com/pkg/baz"`, `bazbaz "bar.com/pkg/baz/baz"`} +func NewImportTrackerForPackage(local string, typesToAdd ...*types.Type) *namer.DefaultImportTracker { + tracker := namer.NewDefaultImportTracker(types.Name{Package: local}) tracker.IsInvalidType = func(*types.Type) bool { return false } - tracker.LocalName = func(name types.Name) string { return golangTrackerLocalName(&tracker, name) } + tracker.LocalName = func(name types.Name) string { return goTrackerLocalName(&tracker, name) } tracker.PrintImport = func(path, name string) string { return name + " \"" + path + "\"" } tracker.AddTypes(typesToAdd...) return &tracker +} +func NewImportTracker(typesToAdd ...*types.Type) *namer.DefaultImportTracker { + return NewImportTrackerForPackage("", typesToAdd...) } -func golangTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { +func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { path := t.Package // Using backslashes in package names causes gengo to produce Go code which @@ -50,11 +69,11 @@ func golangTrackerLocalName(tracker namer.ImportTracker, t types.Name) string { for n := len(dirs) - 1; n >= 0; n-- { // follow kube convention of not having anything between directory names name := strings.Join(dirs[n:], "") - name = strings.Replace(name, "_", "", -1) + name = strings.ReplaceAll(name, "_", "") // These characters commonly appear in import paths for go // packages, but aren't legal go names. So we'll sanitize. - name = strings.Replace(name, ".", "", -1) - name = strings.Replace(name, "-", "", -1) + name = strings.ReplaceAll(name, ".", "") + name = strings.ReplaceAll(name, "-", "") if _, found := tracker.PathOf(name); found { // This name collides with some other package continue diff --git a/vendor/k8s.io/gengo/v2/generator/simple_target.go b/vendor/k8s.io/gengo/v2/generator/simple_target.go new file mode 100644 index 00000000..34df8245 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/generator/simple_target.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generator + +import ( + "k8s.io/gengo/v2/types" +) + +// SimpleTarget is implements Target in terms of static configuration. +// The package name, path, and dir are required to be non-empty. +type SimpleTarget struct { + // PkgName is the name of the resulting package (as in "package xxxx"). + // Required. + PkgName string + // PkgPath is the canonical Go import-path of the resulting package (as in + // "import example.com/xxxx/yyyy"). Required. + PkgPath string + // PkgDir is the location of the resulting package on disk (which may not + // exist yet). It may be absolute or relative to CWD. Required. + PkgDir string + + // HeaderComment is emitted at the top of every output file. Optional. + HeaderComment []byte + + // PkgDocComment is emitted after the header comment for a "doc.go" file. + // Optional. + PkgDocComment []byte + + // FilterFunc will be called to implement Target.Filter. Optional. + FilterFunc func(*Context, *types.Type) bool + + // GeneratorsFunc will be called to implement Target.Generators. Optional. + GeneratorsFunc func(*Context) []Generator +} + +func (st SimpleTarget) Name() string { return st.PkgName } +func (st SimpleTarget) Path() string { return st.PkgPath } +func (st SimpleTarget) Dir() string { return st.PkgDir } + +func (st SimpleTarget) Filter(c *Context, t *types.Type) bool { + if st.FilterFunc != nil { + return st.FilterFunc(c, t) + } + return true +} + +func (st SimpleTarget) Generators(c *Context) []Generator { + if st.GeneratorsFunc != nil { + return st.GeneratorsFunc(c) + } + return nil +} + +func (st SimpleTarget) Header(filename string) []byte { + if filename == "doc.go" { + return append(st.HeaderComment, st.PkgDocComment...) + } + return st.HeaderComment +} + +var ( + _ = Target(SimpleTarget{}) +) diff --git a/vendor/k8s.io/gengo/generator/snippet_writer.go b/vendor/k8s.io/gengo/v2/generator/snippet_writer.go similarity index 88% rename from vendor/k8s.io/gengo/generator/snippet_writer.go rename to vendor/k8s.io/gengo/v2/generator/snippet_writer.go index 590775ff..7f4610c0 100644 --- a/vendor/k8s.io/gengo/generator/snippet_writer.go +++ b/vendor/k8s.io/gengo/v2/generator/snippet_writer.go @@ -74,15 +74,15 @@ func NewSnippetWriter(w io.Writer, c *Context, left, right string) *SnippetWrite // return sw.Error() // // Where: -// * "$" starts a template directive -// * "." references the entire thing passed as args -// * "type" therefore sees a map and looks up the key "type" -// * "|" means "pass the thing on the left to the thing on the right" -// * "public" is the name of a naming system, so the SnippetWriter has given -// the template a function called "public" that takes a *types.Type and -// returns the naming system's name. E.g., if the type is "string" this might -// return "String". -// * the second "$" ends the template directive. +// - "$" starts a template directive +// - "." references the entire thing passed as args +// - "type" therefore sees a map and looks up the key "type" +// - "|" means "pass the thing on the left to the thing on the right" +// - "public" is the name of a naming system, so the SnippetWriter has given +// the template a function called "public" that takes a *types.Type and +// returns the naming system's name. E.g., if the type is "string" this might +// return "String". +// - the second "$" ends the template directive. // // The map is actually not necessary. The below does the same thing: // diff --git a/vendor/k8s.io/gengo/namer/doc.go b/vendor/k8s.io/gengo/v2/namer/doc.go similarity index 96% rename from vendor/k8s.io/gengo/namer/doc.go rename to vendor/k8s.io/gengo/v2/namer/doc.go index 8a44ea99..76309ebb 100644 --- a/vendor/k8s.io/gengo/namer/doc.go +++ b/vendor/k8s.io/gengo/v2/namer/doc.go @@ -28,4 +28,4 @@ limitations under the License. // // Additionally, a "RawNamer" can optionally keep track of what needs to be // imported. -package namer // import "k8s.io/gengo/namer" +package namer // import "k8s.io/gengo/v2/namer" diff --git a/vendor/k8s.io/gengo/namer/import_tracker.go b/vendor/k8s.io/gengo/v2/namer/import_tracker.go similarity index 90% rename from vendor/k8s.io/gengo/namer/import_tracker.go rename to vendor/k8s.io/gengo/v2/namer/import_tracker.go index 37094b2d..f8c5a941 100644 --- a/vendor/k8s.io/gengo/namer/import_tracker.go +++ b/vendor/k8s.io/gengo/v2/namer/import_tracker.go @@ -19,7 +19,7 @@ package namer import ( "sort" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/types" ) // ImportTracker may be passed to a namer.RawNamer, to track the imports needed @@ -55,6 +55,27 @@ func (tracker *DefaultImportTracker) AddTypes(types ...*types.Type) { tracker.AddType(t) } } +func (tracker *DefaultImportTracker) AddSymbol(symbol types.Name) { + if tracker.local.Package == symbol.Package { + return + } + + if len(symbol.Package) == 0 { + return + } + path := symbol.Path + if len(path) == 0 { + path = symbol.Package + } + if _, ok := tracker.pathToName[path]; ok { + return + } + + name := tracker.LocalName(symbol) + tracker.nameToPath[name] = path + tracker.pathToName[path] = name +} + func (tracker *DefaultImportTracker) AddType(t *types.Type) { if tracker.local.Package == t.Name.Package { return @@ -70,19 +91,7 @@ func (tracker *DefaultImportTracker) AddType(t *types.Type) { return } - if len(t.Name.Package) == 0 { - return - } - path := t.Name.Path - if len(path) == 0 { - path = t.Name.Package - } - if _, ok := tracker.pathToName[path]; ok { - return - } - name := tracker.LocalName(t.Name) - tracker.nameToPath[name] = path - tracker.pathToName[path] = name + tracker.AddSymbol(t.Name) } func (tracker *DefaultImportTracker) ImportLines() []string { @@ -90,7 +99,7 @@ func (tracker *DefaultImportTracker) ImportLines() []string { for path := range tracker.pathToName { importPaths = append(importPaths, path) } - sort.Sort(sort.StringSlice(importPaths)) + sort.Strings(importPaths) out := []string{} for _, path := range importPaths { out = append(out, tracker.PrintImport(path, tracker.pathToName[path])) diff --git a/vendor/k8s.io/gengo/namer/namer.go b/vendor/k8s.io/gengo/v2/namer/namer.go similarity index 99% rename from vendor/k8s.io/gengo/namer/namer.go rename to vendor/k8s.io/gengo/v2/namer/namer.go index 6feb2d0c..e82fe66a 100644 --- a/vendor/k8s.io/gengo/namer/namer.go +++ b/vendor/k8s.io/gengo/v2/namer/namer.go @@ -22,7 +22,7 @@ import ( "strconv" "strings" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/types" ) const ( @@ -300,6 +300,7 @@ func (ns *NameStrategy) Name(t *types.Type) string { // import. You can implement yourself or use the one in the generation package. type ImportTracker interface { AddType(*types.Type) + AddSymbol(types.Name) LocalNameOf(packagePath string) string PathOf(localName string) (string, bool) ImportLines() []string diff --git a/vendor/k8s.io/gengo/namer/order.go b/vendor/k8s.io/gengo/v2/namer/order.go similarity index 98% rename from vendor/k8s.io/gengo/namer/order.go rename to vendor/k8s.io/gengo/v2/namer/order.go index fd89be9b..e676f011 100644 --- a/vendor/k8s.io/gengo/namer/order.go +++ b/vendor/k8s.io/gengo/v2/namer/order.go @@ -19,7 +19,7 @@ package namer import ( "sort" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/types" ) // Orderer produces an ordering of types given a Namer. diff --git a/vendor/k8s.io/gengo/namer/plural_namer.go b/vendor/k8s.io/gengo/v2/namer/plural_namer.go similarity index 99% rename from vendor/k8s.io/gengo/namer/plural_namer.go rename to vendor/k8s.io/gengo/v2/namer/plural_namer.go index 0e3ebbf2..6bded6a0 100644 --- a/vendor/k8s.io/gengo/namer/plural_namer.go +++ b/vendor/k8s.io/gengo/v2/namer/plural_namer.go @@ -19,7 +19,7 @@ package namer import ( "strings" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/types" ) var consonants = "bcdfghjklmnpqrstvwxyz" diff --git a/vendor/k8s.io/gengo/parser/doc.go b/vendor/k8s.io/gengo/v2/parser/doc.go similarity index 92% rename from vendor/k8s.io/gengo/parser/doc.go rename to vendor/k8s.io/gengo/v2/parser/doc.go index 8231b6d4..8dc84fac 100644 --- a/vendor/k8s.io/gengo/parser/doc.go +++ b/vendor/k8s.io/gengo/v2/parser/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package parser provides code to parse go files, type-check them, extract the // types. -package parser // import "k8s.io/gengo/parser" +package parser // import "k8s.io/gengo/v2/parser" diff --git a/vendor/k8s.io/gengo/v2/parser/parse.go b/vendor/k8s.io/gengo/v2/parser/parse.go new file mode 100644 index 00000000..a5993d16 --- /dev/null +++ b/vendor/k8s.io/gengo/v2/parser/parse.go @@ -0,0 +1,821 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parser + +import ( + "errors" + "fmt" + "go/ast" + "go/constant" + "go/token" + gotypes "go/types" + "path/filepath" + "sort" + "strings" + "time" + + "golang.org/x/tools/go/packages" + "k8s.io/gengo/v2/types" + "k8s.io/klog/v2" +) + +// Parser lets you add all the go files in all the packages that you care +// about, then constructs the type source data. +type Parser struct { + // Map of package paths to definitions. These keys should be canonical + // Go import paths (example.com/foo/bar) and not local paths (./foo/bar). + goPkgs map[string]*packages.Package + + // Keep track of which packages were directly requested (as opposed to + // those which are transitively loaded). + userRequested map[string]bool + + // Keep track of which packages have already been scanned for types. + fullyProcessed map[string]bool + + // Build tags to set when loading packages. + buildTags []string + + // Tracks accumulated parsed files, so we can do position lookups later. + fset *token.FileSet + + // All comments from everywhere in every parsed file. This map is keyed by + // the file-line on which the comment block ends, which makes it easy to + // look up comments which immediately precede a given obect (e.g. a type or + // function definition), which is what we almost always want. We need this + // because Go's own ast package does a very poor job of handling comments. + endLineToCommentGroup map[fileLine]*ast.CommentGroup +} + +// key type for finding comments. +type fileLine struct { + file string + line int +} + +// New constructs a new Parser. +func New() *Parser { + return NewWithOptions(Options{}) +} + +func NewWithOptions(opts Options) *Parser { + return &Parser{ + goPkgs: map[string]*packages.Package{}, + userRequested: map[string]bool{}, + fullyProcessed: map[string]bool{}, + fset: token.NewFileSet(), + endLineToCommentGroup: map[fileLine]*ast.CommentGroup{}, + buildTags: opts.BuildTags, + } +} + +// Options holds optional settings for the Parser. +type Options struct { + // BuildTags is a list of optional tags to be specified when loading + // packages. + BuildTags []string +} + +// FindPackages expands the provided patterns into a list of Go import-paths, +// much like `go list -find`. +func (p *Parser) FindPackages(patterns ...string) ([]string, error) { + return p.findPackages(nil, patterns...) +} + +// baseCfg is an optional (may be nil) config which might be injected by tests. +func (p *Parser) findPackages(baseCfg *packages.Config, patterns ...string) ([]string, error) { + toFind := make([]string, 0, len(patterns)) + results := make([]string, 0, len(patterns)) + for _, pat := range patterns { + if pkg := p.goPkgs[pat]; pkg != nil { + results = append(results, pkg.PkgPath) + } else { + toFind = append(toFind, pat) + } + } + if len(toFind) == 0 { + return results, nil + } + + cfg := packages.Config{ + Mode: packages.NeedName | packages.NeedFiles, + BuildFlags: []string{"-tags", strings.Join(p.buildTags, ",")}, + Tests: false, + } + if baseCfg != nil { + // This is to support tests, e.g. to inject a fake GOPATH or CWD. + cfg.Dir = baseCfg.Dir + cfg.Env = baseCfg.Env + } + + pkgs, err := packages.Load(&cfg, toFind...) + if err != nil { + return nil, fmt.Errorf("error loading packages: %w", err) + } + var allErrs []error + for _, pkg := range pkgs { + results = append(results, pkg.PkgPath) + + // pkg.Errors is not a slice of `error`, but concrete types. We have + // to iteratively convert each one into `error`. + var errs []error + for _, e := range pkg.Errors { + errs = append(errs, e) + } + if len(errs) > 0 { + allErrs = append(allErrs, fmt.Errorf("error(s) in %q:\n%w", pkg.PkgPath, errors.Join(errs...))) + } + } + if len(allErrs) != 0 { + return nil, errors.Join(allErrs...) + } + return results, nil +} + +// LoadPackages loads and parses the specified Go packages. Specifically +// named packages (without a trailing "/...") which do not exist or have no Go +// files are an error. +func (p *Parser) LoadPackages(patterns ...string) error { + _, err := p.loadPackages(patterns...) + return err +} + +// LoadPackagesWithConfigForTesting loads and parses the specified Go packages with the +// specified packages.Config as a starting point. This is for testing, and +// only the .Dir and .Env fields of the Config will be considered. +func (p *Parser) LoadPackagesWithConfigForTesting(cfg *packages.Config, patterns ...string) error { + _, err := p.loadPackagesWithConfig(cfg, patterns...) + return err +} + +// LoadPackagesTo loads and parses the specified Go packages, and inserts them +// into the specified Universe. It returns the packages which match the +// patterns, but loads all packages and their imports, recursively, into the +// universe. See NewUniverse for more. +func (p *Parser) LoadPackagesTo(u *types.Universe, patterns ...string) ([]*types.Package, error) { + // Load Packages. + pkgs, err := p.loadPackages(patterns...) + if err != nil { + return nil, err + } + + // Load types in all packages (it will internally filter). + if err := p.addPkgsToUniverse(pkgs, u); err != nil { + return nil, err + } + + // Return the results as gengo types.Packages. + ret := make([]*types.Package, 0, len(pkgs)) + for _, pkg := range pkgs { + ret = append(ret, u.Package(pkg.PkgPath)) + } + + return ret, nil +} + +func (p *Parser) loadPackages(patterns ...string) ([]*packages.Package, error) { + return p.loadPackagesWithConfig(nil, patterns...) +} + +// baseCfg is an optional (may be nil) config which might be injected by tests. +func (p *Parser) loadPackagesWithConfig(baseCfg *packages.Config, patterns ...string) ([]*packages.Package, error) { + klog.V(5).Infof("loadPackages %q", patterns) + + // Loading packages is slow - only do ones we know we have not already done + // (e.g. if a tool calls LoadPackages itself). + existingPkgs, netNewPkgs, err := p.alreadyLoaded(baseCfg, patterns...) + if err != nil { + return nil, err + } + if vlog := klog.V(5); vlog.Enabled() { + if len(existingPkgs) > 0 { + keys := make([]string, 0, len(existingPkgs)) + for _, p := range existingPkgs { + keys = append(keys, p.PkgPath) + } + vlog.Infof(" already have: %q", keys) + } + if len(netNewPkgs) > 0 { + vlog.Infof(" to be loaded: %q", netNewPkgs) + } + } + + // If these were not user-requested before, they are now. + for _, pkg := range existingPkgs { + if !p.userRequested[pkg.PkgPath] { + p.userRequested[pkg.PkgPath] = true + } + } + for _, pkg := range netNewPkgs { + if !p.userRequested[pkg] { + p.userRequested[pkg] = true + } + } + + if len(netNewPkgs) == 0 { + return existingPkgs, nil + } + + cfg := packages.Config{ + Mode: packages.NeedName | + packages.NeedFiles | packages.NeedImports | packages.NeedDeps | + packages.NeedModule | packages.NeedTypes | packages.NeedSyntax, + BuildFlags: []string{"-tags", strings.Join(p.buildTags, ",")}, + Fset: p.fset, + Tests: false, + } + if baseCfg != nil { + // This is to support tests, e.g. to inject a fake GOPATH or CWD. + cfg.Dir = baseCfg.Dir + cfg.Env = baseCfg.Env + } + + tBefore := time.Now() + pkgs, err := packages.Load(&cfg, netNewPkgs...) + if err != nil { + return nil, fmt.Errorf("error loading packages: %w", err) + } + klog.V(5).Infof(" loaded %d pkg(s) in %v", len(pkgs), time.Since(tBefore)) + + // Handle any errors. + collectErrors := func(pkg *packages.Package) error { + var errs []error + for _, e := range pkg.Errors { + if e.Kind == packages.ListError || e.Kind == packages.ParseError { + errs = append(errs, e) + } + } + if len(errs) > 0 { + return fmt.Errorf("error(s) in %q:\n%w", pkg.PkgPath, errors.Join(errs...)) + } + return nil + } + if err := forEachPackageRecursive(pkgs, collectErrors); err != nil { + return nil, err + } + + // Finish integrating packages into our state. + absorbPkg := func(pkg *packages.Package) error { + p.goPkgs[pkg.PkgPath] = pkg + + for _, f := range pkg.Syntax { + for _, c := range f.Comments { + // We need to do this on _every_ pkg, not just user-requested + // ones, because some generators look at tags in other + // packages. + // + // TODO: It would be nice if we only did this on user-requested + // packages. The problem is that we don't always know which + // other packages will need this information, and even when we + // do we may have already loaded the package (as a transitive + // dep) and might have stored pointers into it. Doing a + // thorough "reload" without invalidating all those pointers is + // a problem for another day. + position := p.fset.Position(c.End()) // Fset is synchronized + p.endLineToCommentGroup[fileLine{position.Filename, position.Line}] = c + } + } + + return nil + } + if err := forEachPackageRecursive(pkgs, absorbPkg); err != nil { + return nil, err + } + + return append(existingPkgs, pkgs...), nil +} + +// alreadyLoaded figures out which of the specified patterns have already been loaded +// and which have not, and returns those respectively. +// baseCfg is an optional (may be nil) config which might be injected by tests. +func (p *Parser) alreadyLoaded(baseCfg *packages.Config, patterns ...string) ([]*packages.Package, []string, error) { + existingPkgs := make([]*packages.Package, 0, len(patterns)) + netNewPkgs := make([]string, 0, len(patterns)) + + // Expand and canonicalize the requested patterns. This should be fast. + if pkgPaths, err := p.findPackages(baseCfg, patterns...); err != nil { + return nil, nil, err + } else { + for _, pkgPath := range pkgPaths { + if pkg := p.goPkgs[pkgPath]; pkg != nil { + existingPkgs = append(existingPkgs, pkg) + } else { + netNewPkgs = append(netNewPkgs, pkgPath) + } + } + } + return existingPkgs, netNewPkgs, nil +} + +// forEachPackageRecursive will run the provided function on all of the specified +// packages, and on their imports recursively. Errors are accumulated and +// returned as via errors.Join. +func forEachPackageRecursive(pkgs []*packages.Package, fn func(pkg *packages.Package) error) error { + seen := map[string]bool{} // PkgPaths we have already visited + var errs []error + for _, pkg := range pkgs { + errs = append(errs, recursePackage(pkg, fn, seen)...) + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil +} + +func recursePackage(pkg *packages.Package, fn func(pkg *packages.Package) error, seen map[string]bool) []error { + if seen[pkg.PkgPath] { + return nil + } + var errs []error + seen[pkg.PkgPath] = true + if err := fn(pkg); err != nil { + errs = append(errs, err) + } + for _, imp := range pkg.Imports { + errs = append(errs, recursePackage(imp, fn, seen)...) + } + return errs +} + +// UserRequestedPackages fetches a list of the user-imported packages. +func (p *Parser) UserRequestedPackages() []string { + // Iterate packages in a predictable order. + pkgPaths := make([]string, 0, len(p.userRequested)) + for k := range p.userRequested { + pkgPaths = append(pkgPaths, string(k)) + } + sort.Strings(pkgPaths) + return pkgPaths +} + +// NewUniverse finalizes the loaded packages, searches through them for types +// and produces a new Universe. The returned Universe has one types.Package +// entry for each Go package that has been loaded, including all of their +// dependencies, recursively. It also has one entry, whose key is "", which +// represents "builtin" types. +func (p *Parser) NewUniverse() (types.Universe, error) { + u := types.Universe{} + + pkgs := []*packages.Package{} + for _, path := range p.UserRequestedPackages() { + pkgs = append(pkgs, p.goPkgs[path]) + } + if err := p.addPkgsToUniverse(pkgs, &u); err != nil { + return nil, err + } + + return u, nil +} + +// addCommentsToType takes any accumulated comment lines prior to obj and +// attaches them to the type t. +func (p *Parser) addCommentsToType(obj gotypes.Object, t *types.Type) { + t.CommentLines = p.docComment(obj.Pos()) + t.SecondClosestCommentLines = p.priorDetachedComment(obj.Pos()) +} + +// packageDir tries to figure out the directory of the specified package. +func packageDir(pkg *packages.Package) (string, error) { + // Sometimes Module is present but has no Dir, e.g. when it is vendored. + if pkg.Module != nil && pkg.Module.Dir != "" { + // NOTE: this will not work if tests are loaded, because Go mutates the + // Package.PkgPath. + subdir := strings.TrimPrefix(pkg.PkgPath, pkg.Module.Path) + return filepath.Join(pkg.Module.Dir, subdir), nil + } + if len(pkg.GoFiles) > 0 { + return filepath.Dir(pkg.GoFiles[0]), nil + } + if len(pkg.IgnoredFiles) > 0 { + return filepath.Dir(pkg.IgnoredFiles[0]), nil + } + return "", fmt.Errorf("can't find package dir for %q - no module info and no Go files", pkg.PkgPath) +} + +// addPkgsToUniverse adds the packages, and all of their deps, recursively, to +// the universe and (if needed) searches through them for types. +func (p *Parser) addPkgsToUniverse(pkgs []*packages.Package, u *types.Universe) error { + addOne := func(pkg *packages.Package) error { + if err := p.addPkgToUniverse(pkg, u); err != nil { + return err + } + return nil + } + if err := forEachPackageRecursive(pkgs, addOne); err != nil { + return err + } + return nil +} + +// addPkgToUniverse adds one package to the universe and (if needed) searches +// through it for types. +func (p *Parser) addPkgToUniverse(pkg *packages.Package, u *types.Universe) error { + pkgPath := pkg.PkgPath + if p.fullyProcessed[pkgPath] { + return nil + } + + // This will get-or-create the Package. + gengoPkg := u.Package(pkgPath) + + if gengoPkg.Dir == "" { + // We're keeping this package, though we might not fully process it. + if vlog := klog.V(5); vlog.Enabled() { + why := "user-requested" + if !p.userRequested[pkgPath] { + why = "dependency" + } + vlog.Infof("addPkgToUniverse %q (%s)", pkgPath, why) + } + + absPath := "" + if dir, err := packageDir(pkg); err != nil { + return err + } else { + absPath = dir + } + + gengoPkg.Path = pkg.PkgPath + gengoPkg.Dir = absPath + } + + // If the package was not user-requested, we can stop here. + if !p.userRequested[pkgPath] { + return nil + } + + // Mark it as done, so we don't ever re-process it. + p.fullyProcessed[pkgPath] = true + gengoPkg.Name = pkg.Name + + // For historical reasons we treat files named "doc.go" specially. + // TODO: It would be nice to not do this and instead treat package + // doc-comments as the "global" config place. This would require changing + // most generators and input files. + for _, f := range pkg.Syntax { + // This gets the filename for the ast.File. Iterating pkg.GoFiles is + // documented as unreliable. + pos := p.fset.Position(f.FileStart) + if filepath.Base(pos.Filename) == "doc.go" { + gengoPkg.Comments = []string{} + for i := range f.Comments { + gengoPkg.Comments = append(gengoPkg.Comments, splitLines(f.Comments[i].Text())...) + } + if f.Doc != nil { + gengoPkg.DocComments = splitLines(f.Doc.Text()) + } + } + } + + // Walk all the types, recursively and save them for later access. + s := pkg.Types.Scope() + for _, n := range s.Names() { + switch obj := s.Lookup(n).(type) { + case *gotypes.TypeName: + t := p.walkType(*u, nil, obj.Type()) + p.addCommentsToType(obj, t) + case *gotypes.Func: + // We only care about functions, not concrete/abstract methods. + if obj.Type() != nil && obj.Type().(*gotypes.Signature).Recv() == nil { + t := p.addFunction(*u, nil, obj) + p.addCommentsToType(obj, t) + } + case *gotypes.Var: + if !obj.IsField() { + t := p.addVariable(*u, nil, obj) + p.addCommentsToType(obj, t) + } + case *gotypes.Const: + t := p.addConstant(*u, nil, obj) + p.addCommentsToType(obj, t) + default: + klog.Infof("addPkgToUniverse %q: unhandled object of type %T: %v", pkgPath, obj, obj) + } + } + + // Add all of this package's imports. + importedPkgs := []string{} + for _, imp := range pkg.Imports { + if err := p.addPkgToUniverse(imp, u); err != nil { + return err + } + importedPkgs = append(importedPkgs, imp.PkgPath) + } + sort.Strings(importedPkgs) + u.AddImports(pkg.PkgPath, importedPkgs...) + + return nil +} + +// If the specified position has a "doc comment", return that. +func (p *Parser) docComment(pos token.Pos) []string { + // An object's doc comment always ends on the line before the object's own + // declaration. + c1 := p.priorCommentLines(pos, 1) + return splitLines(c1.Text()) // safe even if c1 is nil +} + +// If there is a detached (not immediately before a declaration) comment, +// return that. +func (p *Parser) priorDetachedComment(pos token.Pos) []string { + // An object's doc comment always ends on the line before the object's own + // declaration. + c1 := p.priorCommentLines(pos, 1) + + // Using a literal "2" here is brittle in theory (it means literally 2 + // lines), but in practice Go code is gofmt'ed (which elides repeated blank + // lines), so it works. + var c2 *ast.CommentGroup + if c1 == nil { + c2 = p.priorCommentLines(pos, 2) + } else { + c2 = p.priorCommentLines(c1.List[0].Slash, 2) + } + return splitLines(c2.Text()) // safe even if c1 is nil +} + +// If there's a comment block which ends nlines before pos, return it. +func (p *Parser) priorCommentLines(pos token.Pos, lines int) *ast.CommentGroup { + position := p.fset.Position(pos) + key := fileLine{position.Filename, position.Line - lines} + return p.endLineToCommentGroup[key] +} + +func splitLines(str string) []string { + return strings.Split(strings.TrimRight(str, "\n"), "\n") +} + +func goFuncNameToName(in string) types.Name { + name := strings.TrimPrefix(in, "func ") + nameParts := strings.Split(name, "(") + return goNameToName(nameParts[0]) +} + +func goVarNameToName(in string) types.Name { + nameParts := strings.Split(in, " ") + // nameParts[0] is "var". + // nameParts[2:] is the type of the variable, we ignore it for now. + return goNameToName(nameParts[1]) +} + +func goNameToName(in string) types.Name { + // Detect anonymous type names. (These may have '.' characters because + // embedded types may have packages, so we detect them specially.) + if strings.HasPrefix(in, "struct{") || + strings.HasPrefix(in, "<-chan") || + strings.HasPrefix(in, "chan<-") || + strings.HasPrefix(in, "chan ") || + strings.HasPrefix(in, "func(") || + strings.HasPrefix(in, "func (") || + strings.HasPrefix(in, "*") || + strings.HasPrefix(in, "map[") || + strings.HasPrefix(in, "[") { + return types.Name{Name: in} + } + + // Otherwise, if there are '.' characters present, the name has a + // package path in front. + nameParts := strings.Split(in, ".") + name := types.Name{Name: in} + if n := len(nameParts); n >= 2 { + // The final "." is the name of the type--previous ones must + // have been in the package path. + name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1] + } + return name +} + +func (p *Parser) convertSignature(u types.Universe, t *gotypes.Signature) *types.Signature { + signature := &types.Signature{} + for i := 0; i < t.Params().Len(); i++ { + signature.Parameters = append(signature.Parameters, p.walkType(u, nil, t.Params().At(i).Type())) + signature.ParameterNames = append(signature.ParameterNames, t.Params().At(i).Name()) + } + for i := 0; i < t.Results().Len(); i++ { + signature.Results = append(signature.Results, p.walkType(u, nil, t.Results().At(i).Type())) + signature.ResultNames = append(signature.ResultNames, t.Results().At(i).Name()) + } + if r := t.Recv(); r != nil { + signature.Receiver = p.walkType(u, nil, r.Type()) + } + signature.Variadic = t.Variadic() + return signature +} + +// walkType adds the type, and any necessary child types. +func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type) *types.Type { + // Most of the cases are underlying types of the named type. + name := goNameToName(in.String()) + if useName != nil { + name = *useName + } + + switch t := in.(type) { + case *gotypes.Struct: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Struct + for i := 0; i < t.NumFields(); i++ { + f := t.Field(i) + m := types.Member{ + Name: f.Name(), + Embedded: f.Anonymous(), + Tags: t.Tag(i), + Type: p.walkType(u, nil, f.Type()), + CommentLines: p.docComment(f.Pos()), + } + out.Members = append(out.Members, m) + } + return out + case *gotypes.Map: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Map + out.Elem = p.walkType(u, nil, t.Elem()) + out.Key = p.walkType(u, nil, t.Key()) + return out + case *gotypes.Pointer: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Pointer + out.Elem = p.walkType(u, nil, t.Elem()) + return out + case *gotypes.Slice: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Slice + out.Elem = p.walkType(u, nil, t.Elem()) + return out + case *gotypes.Array: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Array + out.Elem = p.walkType(u, nil, t.Elem()) + out.Len = in.(*gotypes.Array).Len() + return out + case *gotypes.Chan: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Chan + out.Elem = p.walkType(u, nil, t.Elem()) + // TODO: need to store direction, otherwise raw type name + // cannot be properly written. + return out + case *gotypes.Basic: + out := u.Type(types.Name{ + Package: "", // This is a magic package name in the Universe. + Name: t.Name(), + }) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Unsupported + return out + case *gotypes.Signature: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Func + out.Signature = p.convertSignature(u, t) + return out + case *gotypes.Interface: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Interface + t.Complete() + for i := 0; i < t.NumMethods(); i++ { + if out.Methods == nil { + out.Methods = map[string]*types.Type{} + } + method := t.Method(i) + name := goNameToName(method.String()) + mt := p.walkType(u, &name, method.Type()) + mt.CommentLines = p.docComment(method.Pos()) + out.Methods[method.Name()] = mt + } + return out + case *gotypes.Named: + var out *types.Type + switch t.Underlying().(type) { + case *gotypes.Named, *gotypes.Basic, *gotypes.Map, *gotypes.Slice: + name := goNameToName(t.String()) + out = u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Alias + out.Underlying = p.walkType(u, nil, t.Underlying()) + default: + // gotypes package makes everything "named" with an + // underlying anonymous type--we remove that annoying + // "feature" for users. This flattens those types + // together. + name := goNameToName(t.String()) + if out := u.Type(name); out.Kind != types.Unknown { + return out // short circuit if we've already made this. + } + out = p.walkType(u, &name, t.Underlying()) + } + // If the underlying type didn't already add methods, add them. + // (Interface types will have already added methods.) + if len(out.Methods) == 0 { + for i := 0; i < t.NumMethods(); i++ { + if out.Methods == nil { + out.Methods = map[string]*types.Type{} + } + method := t.Method(i) + name := goNameToName(method.String()) + mt := p.walkType(u, &name, method.Type()) + mt.CommentLines = p.docComment(method.Pos()) + out.Methods[method.Name()] = mt + } + } + return out + default: + out := u.Type(name) + if out.Kind != types.Unknown { + return out + } + out.Kind = types.Unsupported + klog.Warningf("Making unsupported type entry %q for: %#v\n", out, t) + return out + } +} + +func (p *Parser) addFunction(u types.Universe, useName *types.Name, in *gotypes.Func) *types.Type { + name := goFuncNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Function(name) + out.Kind = types.DeclarationOf + out.Underlying = p.walkType(u, nil, in.Type()) + return out +} + +func (p *Parser) addVariable(u types.Universe, useName *types.Name, in *gotypes.Var) *types.Type { + name := goVarNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Variable(name) + out.Kind = types.DeclarationOf + out.Underlying = p.walkType(u, nil, in.Type()) + return out +} + +func (p *Parser) addConstant(u types.Universe, useName *types.Name, in *gotypes.Const) *types.Type { + name := goVarNameToName(in.String()) + if useName != nil { + name = *useName + } + out := u.Constant(name) + out.Kind = types.DeclarationOf + out.Underlying = p.walkType(u, nil, in.Type()) + + var constval string + + // For strings, we use `StringVal()` to get the un-truncated, + // un-quoted string. For other values, `.String()` is preferable to + // get something relatively human readable (especially since for + // floating point types, `ExactString()` will generate numeric + // expressions using `big.(*Float).Text()`. + switch in.Val().Kind() { + case constant.String: + constval = constant.StringVal(in.Val()) + default: + constval = in.Val().String() + } + + out.ConstValue = &constval + return out +} diff --git a/vendor/k8s.io/gengo/types/doc.go b/vendor/k8s.io/gengo/v2/types/doc.go similarity index 93% rename from vendor/k8s.io/gengo/types/doc.go rename to vendor/k8s.io/gengo/v2/types/doc.go index 74a969a7..23acb879 100644 --- a/vendor/k8s.io/gengo/types/doc.go +++ b/vendor/k8s.io/gengo/v2/types/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package types contains go type information, packaged in a way that makes // auto-generation convenient, whether by template or straight go functions. -package types // import "k8s.io/gengo/types" +package types // import "k8s.io/gengo/v2/types" diff --git a/vendor/k8s.io/gengo/types/types.go b/vendor/k8s.io/gengo/v2/types/types.go similarity index 98% rename from vendor/k8s.io/gengo/types/types.go rename to vendor/k8s.io/gengo/v2/types/types.go index 77650255..e9c8319c 100644 --- a/vendor/k8s.io/gengo/types/types.go +++ b/vendor/k8s.io/gengo/v2/types/types.go @@ -108,14 +108,13 @@ const ( // templates (for example). But it is strongly encouraged for code to build by // using the provided functions. type Package struct { - // Canonical name of this package-- its path. + // Canonical import-path of this package. Path string - // The location this package was loaded from - SourcePath string + // The location (on disk) of this package. + Dir string - // Short name of this package; the name that appears in the - // 'package x' line. + // Short name of this package, as in the 'package x' line. Name string // The comment right above the package declaration in doc.go, if any. @@ -359,6 +358,9 @@ type Type struct { // String returns the name of the type. func (t *Type) String() string { + if t == nil { + return "" // makes tests easier + } return t.Name.String() } diff --git a/vendor/k8s.io/klog/v2/.golangci.yaml b/vendor/k8s.io/klog/v2/.golangci.yaml new file mode 100644 index 00000000..0d77d65f --- /dev/null +++ b/vendor/k8s.io/klog/v2/.golangci.yaml @@ -0,0 +1,6 @@ +linters: + disable-all: true + enable: # sorted alphabetical + - gofmt + - misspell + - revive diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index a2fe8f35..7500475a 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,14 +1,16 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: - harshanarayana + - mengjiao-liu - pohly approvers: - dims + - pohly - thockin - - serathius emeritus_approvers: - brancz - justinsb - lavalamp - piosz + - serathius - tallclair diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go b/vendor/k8s.io/klog/v2/contextual_slog.go similarity index 59% rename from vendor/k8s.io/gengo/examples/set-gen/sets/empty.go rename to vendor/k8s.io/klog/v2/contextual_slog.go index e11e622c..d3b56252 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/empty.go +++ b/vendor/k8s.io/klog/v2/contextual_slog.go @@ -1,5 +1,8 @@ +//go:build go1.21 +// +build go1.21 + /* -Copyright The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +17,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by set-gen. DO NOT EDIT. +package klog + +import ( + "log/slog" -package sets + "github.com/go-logr/logr" +) -// Empty is public since it is used by some internal API objects for conversions between external -// string arrays and internal sets, and conversion logic requires public types today. -type Empty struct{} +// SetSlogLogger reconfigures klog to log through the slog logger. The logger must not be nil. +func SetSlogLogger(logger *slog.Logger) { + SetLoggerWithOptions(logr.FromSlogHandler(logger.Handler()), ContextualLogger(true)) +} diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 00000000..63995ca6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index f325ded5..46de00fb 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -30,14 +30,16 @@ import ( var ( // Pid is inserted into log headers. Can be overridden for tests. Pid = os.Getpid() + + // Time, if set, will be used instead of the actual current time. + Time *time.Time ) // Buffer holds a single byte.Buffer for reuse. The zero value is ready for // use. It also provides some helper methods for output formatting. type Buffer struct { bytes.Buffer - Tmp [64]byte // temporary byte array for creating headers. - next *Buffer + Tmp [64]byte // temporary byte array for creating headers. } var buffers = sync.Pool{ @@ -122,6 +124,9 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] @@ -157,6 +162,9 @@ func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go index b8b6af5c..cc11bb48 100644 --- a/vendor/k8s.io/klog/v2/internal/clock/clock.go +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -39,16 +39,6 @@ type Clock interface { // Sleep sleeps for the provided duration d. // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. Sleep(d time.Duration) - // Tick returns the channel of a new Ticker. - // This method does not allow to free/GC the backing ticker. Use - // NewTicker from WithTicker instead. - Tick(d time.Duration) <-chan time.Time -} - -// WithTicker allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type WithTicker interface { - Clock // NewTicker returns a new Ticker. NewTicker(time.Duration) Ticker } @@ -66,7 +56,7 @@ type WithDelayedExecution interface { // WithTickerAndDelayedExecution allows for injecting fake or real clocks // into code that needs Ticker and AfterFunc functionality type WithTickerAndDelayedExecution interface { - WithTicker + Clock // AfterFunc executes f in its own goroutine after waiting // for d duration and returns a Timer whose channel can be // closed by calling Stop() on the Timer. @@ -79,7 +69,7 @@ type Ticker interface { Stop() } -var _ = WithTicker(RealClock{}) +var _ Clock = RealClock{} // RealClock really calls time.Now() type RealClock struct{} @@ -115,13 +105,6 @@ func (RealClock) AfterFunc(d time.Duration, f func()) Timer { } } -// Tick is the same as time.Tick(d) -// This method does not allow to free/GC the backing ticker. Use -// NewTicker instead. -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) -} - // NewTicker returns a new Ticker. func (RealClock) NewTicker(d time.Duration) Ticker { return &realTicker{ diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index 1dc81a15..d1a4751c 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -18,6 +18,7 @@ package serialize import ( "bytes" + "encoding/json" "fmt" "strconv" @@ -171,83 +172,33 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { Formatter{}.KVListFormat(b, keysAndValues...) } -// KVFormat serializes one key/value pair into the provided buffer. -// A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } - - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case textWriter: - writeTextWriterValue(b, v) - case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) - case string: - writeStringValue(b, true, v) - case error: - writeStringValue(b, true, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, true, value) - default: - writeStringValue(b, false, f.AnyToString(value)) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) - default: - writeStringValue(b, false, f.AnyToString(v)) - } -} - func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } -// AnyToString is the historic fallback formatter. -func (f Formatter) AnyToString(v interface{}) string { +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') if f.AnyToStringHook != nil { - return f.AnyToStringHook(v) + b.WriteString(f.AnyToStringHook(v)) + return + } + formatAsJSON(b, v) +} + +func formatAsJSON(b *bytes.Buffer, v interface{}) { + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return } - return fmt.Sprintf("%+v", v) + // Remove trailing newline. + b.Truncate(b.Len() - 1) } // StringerToString converts a Stringer to a string, @@ -287,7 +238,7 @@ func ErrorToString(err error) (ret string) { } func writeTextWriterValue(b *bytes.Buffer, v textWriter) { - b.WriteRune('=') + b.WriteByte('=') defer func() { if err := recover(); err != nil { fmt.Fprintf(b, `""`, err) @@ -296,18 +247,13 @@ func writeTextWriterValue(b *bytes.Buffer, v textWriter) { v.WriteText(b) } -func writeStringValue(b *bytes.Buffer, quote bool, v string) { +func writeStringValue(b *bytes.Buffer, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') if index == -1 { b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) return } diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go new file mode 100644 index 00000000..d9c7d154 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -0,0 +1,97 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go new file mode 100644 index 00000000..89acf977 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -0,0 +1,155 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "log/slog" + "strconv" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + // + // slog.LogValuer does not need to be handled here because the handler will + // already have resolved such special values to the final value for logging. + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case slog.Value: + // This must come before fmt.Stringer because slog.Value implements + // fmt.Stringer, but does not produce the output that we want. + b.WriteByte('=') + generateJSON(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case slog.LogValuer: + value := slog.AnyValue(v).Resolve() + if value.Kind() == slog.KindString { + writeStringValue(b, value.String()) + } else { + b.WriteByte('=') + generateJSON(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +// generateJSON has the same preference for plain strings as KVFormat. +// In contrast to KVFormat it always produces valid JSON with no line breaks. +func generateJSON(b *bytes.Buffer, v interface{}) { + switch v := v.(type) { + case slog.Value: + switch v.Kind() { + case slog.KindGroup: + // Format as a JSON group. We must not involve f.AnyToStringHook (if there is any), + // because there is no guarantee that it produces valid JSON. + b.WriteByte('{') + for i, attr := range v.Group() { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(strconv.Quote(attr.Key)) + b.WriteByte(':') + generateJSON(b, attr.Value) + } + b.WriteByte('}') + case slog.KindLogValuer: + generateJSON(b, v.Resolve()) + default: + // Peel off the slog.Value wrapper and format the actual value. + generateJSON(b, v.Any()) + } + case fmt.Stringer: + b.WriteString(strconv.Quote(StringerToString(v))) + case logr.Marshaler: + generateJSON(b, MarshalerToValue(v)) + case slog.LogValuer: + generateJSON(b, slog.AnyValue(v).Resolve().Any()) + case string: + b.WriteString(strconv.Quote(v)) + case error: + b.WriteString(strconv.Quote(v.Error())) + default: + formatAsJSON(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go new file mode 100644 index 00000000..21f1697d --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sloghandler + +import ( + "context" + "log/slog" + "runtime" + "strings" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error { + now := record.Time + if now.IsZero() { + // This format doesn't support printing entries without a time. + now = time.Now() + } + + // slog has numeric severity levels, with 0 as default "info", negative for debugging, and + // positive with some pre-defined levels for more important. Those ranges get mapped to + // the corresponding klog levels where possible, with "info" the default that is used + // also for negative debug levels. + level := record.Level + s := severity.InfoLog + switch { + case level >= slog.LevelError: + s = severity.ErrorLog + case level >= slog.LevelWarn: + s = severity.WarningLog + } + + var file string + var line int + if record.PC != 0 { + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{record.PC}) + f, _ := fs.Next() + if f.File != "" { + file = f.File + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + line = f.Line + } + } else { + file = "???" + line = 1 + } + + kvList := make([]interface{}, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = appendAttr(groups, kvList, attr) + return true + }) + + printWithInfos(file, line, now, nil, s, record.Message, kvList) + return nil +} + +func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} { + kvList := make([]interface{}, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = appendAttr(groups, kvList, attr) + } + return kvList +} + +func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} { + var key string + if groups != "" { + key = groups + "." + attr.Key + } else { + key = attr.Key + } + return append(kvList, key, attr.Value) +} diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index ecd3f8b6..786af74b 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -178,14 +178,14 @@ func (ks kobjSlice) process() (objs []interface{}, err string) { return objectRefs, "" } -var nilToken = []byte("") +var nilToken = []byte("null") func (ks kobjSlice) WriteText(out *bytes.Buffer) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: - // nil parameter, print as empty slice. - out.WriteString("[]") + // nil parameter, print as null. + out.Write(nilToken) return case reflect.Slice: // Okay, handle below. @@ -197,15 +197,15 @@ func (ks kobjSlice) WriteText(out *bytes.Buffer) { defer out.Write([]byte{']'}) for i := 0; i < s.Len(); i++ { if i > 0 { - out.Write([]byte{' '}) + out.Write([]byte{','}) } item := s.Index(i).Interface() if item == nil { out.Write(nilToken) } else if v, ok := item.(KMetadata); ok { - KObj(v).writeUnquoted(out) + KObj(v).WriteText(out) } else { - fmt.Fprintf(out, "", item) + fmt.Fprintf(out, `""`, item) return } } diff --git a/vendor/k8s.io/klog/v2/k8s_references_slog.go b/vendor/k8s.io/klog/v2/k8s_references_slog.go new file mode 100644 index 00000000..5522c84c --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references_slog.go @@ -0,0 +1,39 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" +) + +func (ref ObjectRef) LogValue() slog.Value { + if ref.Namespace != "" { + return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace)) + } + return slog.GroupValue(slog.String("name", ref.Name)) +} + +var _ slog.LogValuer = ObjectRef{} + +func (ks kobjSlice) LogValue() slog.Value { + return slog.AnyValue(ks.MarshalLog()) +} + +var _ slog.LogValuer = kobjSlice{} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 466eeaf2..47ec9466 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -14,9 +14,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// Package klog contains the following functionality: +// +// - output routing as defined via command line flags ([InitFlags]) +// - log formatting as text, either with a single, unstructured string ([Info], [Infof], etc.) +// or as a structured log entry with message and key/value pairs ([InfoS], etc.) +// - management of a go-logr [Logger] ([SetLogger], [Background], [TODO]) +// - helper functions for logging values ([Format]) and managing the state of klog ([CaptureState], [State.Restore]) +// - wrappers for [logr] APIs for contextual logging where the wrappers can +// be turned into no-ops ([EnableContextualLogging], [NewContext], [FromContext], +// [LoggerWithValues], [LoggerWithName]); if the ability to turn off +// contextual logging is not needed, then go-logr can also be used directly +// - type aliases for go-logr types to simplify imports in code which uses both (e.g. [Logger]) +// - [k8s.io/klog/v2/textlogger]: a logger which uses the same formatting as klog log with +// simpler output routing; beware that it comes with its own command line flags +// and does not use the ones from klog +// - [k8s.io/klog/v2/ktesting]: per-test output in Go unit tests +// - [k8s.io/klog/v2/klogr]: a deprecated, standalone [logr.Logger] on top of the main klog package; +// use [Background] instead if klog output routing is needed, [k8s.io/klog/v2/textlogger] if not +// - [k8s.io/klog/v2/examples]: demos of this functionality +// - [k8s.io/klog/v2/test]: reusable tests for [logr.Logger] implementations // // Basic examples: // @@ -387,13 +404,6 @@ func (t *traceLocation) Set(value string) error { return nil } -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - var logging loggingT var commandLine flag.FlagSet @@ -415,7 +425,7 @@ func init() { logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -469,7 +479,7 @@ type settings struct { // Access to all of the following fields must be protected via a mutex. // file holds writer for each of the log types. - file [severity.NumSeverity]flushSyncWriter + file [severity.NumSeverity]io.Writer // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration @@ -518,9 +528,7 @@ type settings struct { func (s settings) deepCopy() settings { // vmodule is a slice and would be shared, so we have copy it. filter := make([]modulePat, len(s.vmodule.filter)) - for i := range s.vmodule.filter { - filter[i] = s.vmodule.filter[i] - } + copy(filter, s.vmodule.filter) s.vmodule.filter = filter if s.logger != nil { @@ -657,16 +665,15 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin } } } - return l.formatHeader(s, file, line), file, line + return l.formatHeader(s, file, line, timeNow()), file, line } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { +func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer { buf := buffer.GetBuffer() if l.skipHeaders { return buf } - now := timeNow() buf.FormatHeader(s, file, line, now) return buf } @@ -676,6 +683,10 @@ func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFil } func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -696,7 +707,15 @@ func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilte } func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print + } + buf, file, line := l.header(s, depth) + l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...) +} + +func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -719,6 +738,10 @@ func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilt } func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { + if false { + _ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -741,7 +764,7 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter Lo // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) + buf := l.formatHeader(s, file, line, timeNow()) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -759,7 +782,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, fil l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } -// if loggr is specified, will call loggr.Error, otherwise output with logging module. +// if logger is specified, will call logger.Error, otherwise output with logging module. func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -771,7 +794,7 @@ func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } -// if loggr is specified, will call loggr.Info, otherwise output with logging module. +// if logger is specified, will call logger.Info, otherwise output with logging module. func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -783,7 +806,7 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } -// printS is called from infoS and errorS if loggr is not specified. +// printS is called from infoS and errorS if logger is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. @@ -796,37 +819,17 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) + l.printDepth(s, nil, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. buffer.PutBuffer(b) } -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() for s := severity.FatalLog; s >= severity.InfoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb + logging.file[s] = w } } @@ -838,10 +841,7 @@ func SetOutputBySeverity(name string, w io.Writer) { if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb + logging.file[sev] = w } // LogToStderr sets whether to log exclusively to stderr, bypassing outputs @@ -873,6 +873,9 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu if logger.writeKlogBuffer != nil { logger.writeKlogBuffer(data) } else { + if len(data) > 0 && data[len(data)-1] == '\n' { + data = data[:len(data)-1] + } // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { @@ -897,7 +900,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu l.exit(err) } } - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -907,20 +910,20 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu } if l.oneOutput { - l.file[s].Write(data) + _, _ = l.file[s].Write(data) } else { switch s { case severity.FatalLog: - l.file[severity.FatalLog].Write(data) + _, _ = l.file[severity.FatalLog].Write(data) fallthrough case severity.ErrorLog: - l.file[severity.ErrorLog].Write(data) + _, _ = l.file[severity.ErrorLog].Write(data) fallthrough case severity.WarningLog: - l.file[severity.WarningLog].Write(data) + _, _ = l.file[severity.WarningLog].Write(data) fallthrough case severity.InfoLog: - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } } } @@ -946,7 +949,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) + _, _ = f.Write(trace) } } l.mu.Unlock() @@ -978,7 +981,8 @@ func (l *loggingT) exit(err error) { logExitFunc(err) return } - l.flushAll() + needToSync := l.flushAll() + l.syncAll(needToSync) OsExit(2) } @@ -995,10 +999,6 @@ type syncBuffer struct { maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - // CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. func CalculateMaxSize() uint64 { if logging.logFile != "" { @@ -1102,7 +1102,7 @@ const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. type flushDaemon struct { mu sync.Mutex - clock clock.WithTicker + clock clock.Clock flush func() stopC chan struct{} stopDone chan struct{} @@ -1110,7 +1110,7 @@ type flushDaemon struct { // newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a // clock.RealClock is used. -func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { +func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon { if tickClock == nil { tickClock = clock.RealClock{} } @@ -1190,24 +1190,45 @@ func StartFlushDaemon(interval time.Duration) { // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() - l.flushAll() + needToSync := l.flushAll() l.mu.Unlock() + // Some environments are slow when syncing and holding the lock might cause contention. + l.syncAll(needToSync) } -// flushAll flushes all the logs and attempts to "sync" their data to disk. +// flushAll flushes all the logs // l.mu is held. -func (l *loggingT) flushAll() { +// +// The result is the number of files which need to be synced and the pointers to them. +func (l *loggingT) flushAll() fileArray { + var needToSync fileArray + // Flush from fatal down, in case there's trouble flushing. for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error + if sb, ok := file.(*syncBuffer); ok && sb.file != nil { + _ = sb.Flush() // ignore error + needToSync.files[needToSync.num] = sb.file + needToSync.num++ } } if logging.loggerOptions.flush != nil { logging.loggerOptions.flush() } + return needToSync +} + +type fileArray struct { + num int + files [severity.NumSeverity]*os.File +} + +// syncAll attempts to "sync" their data to disk. +func (l *loggingT) syncAll(needToSync fileArray) { + // Flush from fatal down, in case there's trouble flushing. + for i := 0; i < needToSync.num; i++ { + _ = needToSync.files[i].Sync() // ignore error + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's @@ -1228,6 +1249,19 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity.Severity @@ -1268,9 +1302,7 @@ func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } + file = strings.TrimSuffix(file, ".go") if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index 1025d644..8bee1620 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -109,8 +109,8 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err + _ = os.Remove(symlink) // ignore err + _ = os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 15de00e2..efec96fd 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -22,6 +22,11 @@ import ( "k8s.io/klog/v2/internal/serialize" ) +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + // NewKlogr returns a logger that is functionally identical to // klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The // difference is that it uses a simpler implementation. @@ -32,10 +37,15 @@ func NewKlogr() Logger { // klogger is a subset of klogr/klogr.go. It had to be copied to break an // import cycle (klogr wants to use klog, and klog wants to use klogr). type klogger struct { - level int callDepth int - prefix string - values []interface{} + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string } func (l *klogger) Init(info logr.RuntimeInfo) { @@ -44,34 +54,40 @@ func (l *klogger) Init(info logr.RuntimeInfo) { func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } // Skip this function. VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l *klogger) Enabled(level int) bool { - // Skip this function and logr.Logger.Info where Enabled is called. - return VDepth(l.callDepth+2, Level(level)).Enabled() + return VDepth(l.callDepth+1, Level(level)).Enabled() } func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr -// uses '/' characters to separate name elements. Callers should not pass '/' +// uses '.' characters to separate name elements. Callers should not pass '.' // in the provided name string, but this library does not actually enforce that. func (l klogger) WithName(name string) logr.LogSink { - if len(l.prefix) > 0 { - l.prefix = l.prefix + "/" + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + l.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + l.values = v + l.hasPrefix = true } - l.prefix += name return &l } diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go new file mode 100644 index 00000000..c77d7baa --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + "log/slog" + "strconv" + "time" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *klogger) Handle(ctx context.Context, record slog.Record) error { + if logging.logger != nil { + if slogSink, ok := logging.logger.GetSink().(logr.SlogSink); ok { + // Let that logger do the work. + return slogSink.Handle(ctx, record) + } + } + + return sloghandler.Handle(ctx, record, l.groups, slogOutput) +} + +// slogOutput corresponds to several different functions in klog.go. +// It goes through some of the same checks and formatting steps before +// it ultimately converges by calling logging.printWithInfos. +func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // See infoS. + if logging.logger != nil { + // Taking this path happens when klog has a logger installed + // as backend which doesn't support slog. Not good, we have to + // guess about the call depth and drop the actual location. + logger := logging.logger.WithCallDepth(2) + if s > severity.ErrorLog { + logger.Error(err, msg, kvList...) + } else { + logger.Info(msg, kvList...) + } + return + } + + // See printS. + b := buffer.GetBuffer() + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, kvList...) + + // See print + header. + buf := logging.formatHeader(s, file, line, now) + logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer) + + buffer.PutBuffer(b) +} + +func (l *klogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *klogger) WithGroup(name string) logr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ logr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/klog/v2/safeptr.go b/vendor/k8s.io/klog/v2/safeptr.go new file mode 100644 index 00000000..bbe24c2e --- /dev/null +++ b/vendor/k8s.io/klog/v2/safeptr.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +// SafePtr is a function that takes a pointer of any type (T) as an argument. +// If the provided pointer is not nil, it returns the same pointer. If it is nil, it returns nil instead. +// +// This function is particularly useful to prevent nil pointer dereferencing when: +// +// - The type implements interfaces that are called by the logger, such as `fmt.Stringer`. +// - And these interface implementations do not perform nil checks themselves. +func SafePtr[T any](p *T) any { + if p == nil { + return nil + } + return p +} diff --git a/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go index 19783370..153784ed 100644 --- a/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go +++ b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go @@ -18,59 +18,61 @@ package args import ( "fmt" - "path/filepath" "github.com/spf13/pflag" - "k8s.io/gengo/args" ) -// CustomArgs is used by the gengo framework to pass args specific to this generator. -type CustomArgs struct { - // ReportFilename is added to CustomArgs for specifying name of report file used +type Args struct { + OutputDir string // must be a directory path + OutputPkg string // must be a Go import-path + OutputFile string + + GoHeaderFile string + + // ReportFilename is added to Args for specifying name of report file used // by API linter. If specified, API rule violations will be printed to report file. // Otherwise default value "-" will be used which indicates stdout. ReportFilename string } -// NewDefaults returns default arguments for the generator. Returning the arguments instead +// New returns default arguments for the generator. Returning the arguments instead // of using default flag parsing allows registering custom arguments afterwards -func NewDefaults() (*args.GeneratorArgs, *CustomArgs) { - // Default() sets a couple of flag default values for example the boilerplate. - // WithoutDefaultFlagParsing() disables implicit addition of command line flags and parsing, - // which allows registering custom arguments afterwards - genericArgs := args.Default().WithoutDefaultFlagParsing() - genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kube-openapi/boilerplate/boilerplate.go.txt") - - customArgs := &CustomArgs{} - genericArgs.CustomArgs = customArgs +func New() *Args { + args := &Args{} // Default value for report filename is "-", which stands for stdout - customArgs.ReportFilename = "-" - // Default value for output file base name - genericArgs.OutputFileBaseName = "openapi_generated" + args.ReportFilename = "-" - return genericArgs, customArgs + return args } // AddFlags add the generator flags to the flag set. -func (c *CustomArgs) AddFlags(fs *pflag.FlagSet) { - fs.StringVarP(&c.ReportFilename, "report-filename", "r", c.ReportFilename, "Name of report file used by API linter to print API violations. Default \"-\" stands for standard output. NOTE that if valid filename other than \"-\" is specified, API linter won't return error on detected API violations. This allows further check of existing API violations without stopping the OpenAPI generation toolchain.") +func (args *Args) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&args.OutputDir, "output-dir", "", + "the base directory under which to generate results") + fs.StringVar(&args.OutputPkg, "output-pkg", "", + "the base Go import-path under which to generate results") + fs.StringVar(&args.OutputFile, "output-file", "generated.openapi.go", + "the name of the file to be generated") + fs.StringVar(&args.GoHeaderFile, "go-header-file", "", + "the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year") + fs.StringVarP(&args.ReportFilename, "report-filename", "r", args.ReportFilename, + "Name of report file used by API linter to print API violations. Default \"-\" stands for standard output. NOTE that if valid filename other than \"-\" is specified, API linter won't return error on detected API violations. This allows further check of existing API violations without stopping the OpenAPI generation toolchain.") } // Validate checks the given arguments. -func Validate(genericArgs *args.GeneratorArgs) error { - c, ok := genericArgs.CustomArgs.(*CustomArgs) - if !ok { - return fmt.Errorf("input arguments don't contain valid custom arguments") +func (args *Args) Validate() error { + if len(args.OutputDir) == 0 { + return fmt.Errorf("--output-dir must be specified") } - if len(c.ReportFilename) == 0 { - return fmt.Errorf("report filename cannot be empty. specify a valid filename or use \"-\" for stdout") + if len(args.OutputPkg) == 0 { + return fmt.Errorf("--output-pkg must be specified") } - if len(genericArgs.OutputFileBaseName) == 0 { - return fmt.Errorf("output file base name cannot be empty") + if len(args.OutputFile) == 0 { + return fmt.Errorf("--output-file must be specified") } - if len(genericArgs.OutputPackagePath) == 0 { - return fmt.Errorf("output package cannot be empty") + if len(args.ReportFilename) == 0 { + return fmt.Errorf("--report-filename must be specified (use \"-\" for stdout)") } return nil } diff --git a/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go similarity index 78% rename from vendor/k8s.io/code-generator/cmd/openapi-gen/main.go rename to vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go index c446e80b..b466019a 100644 --- a/vendor/k8s.io/code-generator/cmd/openapi-gen/main.go +++ b/vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go @@ -24,33 +24,38 @@ import ( "flag" "log" - generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" - "k8s.io/kube-openapi/pkg/generators" - "github.com/spf13/pflag" - + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" "k8s.io/klog/v2" + "k8s.io/kube-openapi/cmd/openapi-gen/args" + "k8s.io/kube-openapi/pkg/generators" ) func main() { klog.InitFlags(nil) - genericArgs, customArgs := generatorargs.NewDefaults() + args := args.New() - genericArgs.AddFlags(pflag.CommandLine) - customArgs.AddFlags(pflag.CommandLine) + args.AddFlags(pflag.CommandLine) flag.Set("logtostderr", "true") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - if err := generatorargs.Validate(genericArgs); err != nil { + if err := args.Validate(); err != nil { log.Fatalf("Arguments validation error: %v", err) } + myTargets := func(context *generator.Context) []generator.Target { + return generators.GetTargets(context, args) + } + // Generates the code for the OpenAPIDefinitions. - if err := genericArgs.Execute( + if err := gengo.Execute( generators.NameSystems(), generators.DefaultNameSystem(), - generators.Packages, + myTargets, + gengo.StdBuildTag, + pflag.Args(), ); err != nil { log.Fatalf("OpenAPI code generation error: %v", err) } diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go index 98be932c..1c4cb5bf 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go @@ -152,7 +152,7 @@ func (o *openAPI) finalizeSwagger() (*spec.Swagger, error) { } } - return o.swagger, nil + return deduplicateParameters(o.swagger) } func (o *openAPI) buildDefinitionRecursively(name string) error { diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/parameters.go b/vendor/k8s.io/kube-openapi/pkg/builder/parameters.go new file mode 100644 index 00000000..2bb8bd88 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/builder/parameters.go @@ -0,0 +1,259 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "hash/fnv" + "sort" + "strconv" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// deduplicateParameters finds parameters that are shared across multiple endpoints and replace them with +// references to the shared parameters in order to avoid repetition. +// +// deduplicateParameters does not mutate the source. +func deduplicateParameters(sp *spec.Swagger) (*spec.Swagger, error) { + names, parameters, err := collectSharedParameters(sp) + if err != nil { + return nil, err + } + + if sp.Parameters != nil { + return nil, fmt.Errorf("shared parameters already exist") // should not happen with the builder, but to be sure + } + + clone := *sp + clone.Parameters = parameters + return replaceSharedParameters(names, &clone) +} + +// collectSharedParameters finds parameters that show up for many endpoints. These +// are basically all parameters with the exceptions of those where we know they are +// endpoint specific, e.g. because they reference the schema of the kind, or have +// the kind or resource name in the description. +func collectSharedParameters(sp *spec.Swagger) (namesByJSON map[string]string, ret map[string]spec.Parameter, err error) { + if sp == nil || sp.Paths == nil { + return nil, nil, nil + } + + countsByJSON := map[string]int{} + shared := map[string]spec.Parameter{} + var keys []string + + collect := func(p *spec.Parameter) error { + if (p.In == "query" || p.In == "path") && p.Name == "name" { + return nil // ignore name parameter as they are never shared with the Kind in the description + } + if p.In == "query" && p.Name == "fieldValidation" { + return nil // keep fieldValidation parameter unshared because kubectl uses it (until 1.27) to detect server-side field validation support + } + if p.In == "query" && p.Name == "dryRun" { + return nil // keep fieldValidation parameter unshared because kubectl uses it (until 1.26) to detect dry-run support + } + if p.Schema != nil && p.In == "body" && p.Name == "body" && !strings.HasPrefix(p.Schema.Ref.String(), "#/definitions/io.k8s.apimachinery") { + return nil // ignore non-generic body parameters as they reference the custom schema of the kind + } + + bs, err := json.Marshal(p) + if err != nil { + return err + } + + k := string(bs) + countsByJSON[k]++ + if count := countsByJSON[k]; count == 1 { + shared[k] = *p + keys = append(keys, k) + } + + return nil + } + + for _, path := range sp.Paths.Paths { + // per operation parameters + for _, op := range operations(&path) { + if op == nil { + continue // shouldn't happen, but ignore if it does; tested through unit test + } + for _, p := range op.Parameters { + if p.Ref.String() != "" { + // shouldn't happen, but ignore if it does + continue + } + if err := collect(&p); err != nil { + return nil, nil, err + } + } + } + + // per path parameters + for _, p := range path.Parameters { + if p.Ref.String() != "" { + continue // shouldn't happen, but ignore if it does + } + if err := collect(&p); err != nil { + return nil, nil, err + } + } + } + + // name deterministically + sort.Strings(keys) + ret = map[string]spec.Parameter{} + namesByJSON = map[string]string{} + for _, k := range keys { + name := shared[k].Name + if name == "" { + // this should never happen as the name is a required field. But if it does, let's be safe. + name = "param" + } + name += "-" + base64Hash(k) + i := 0 + for { + if _, ok := ret[name]; !ok { + ret[name] = shared[k] + namesByJSON[k] = name + break + } + i++ // only on hash conflict, unlikely with our few variants + name = shared[k].Name + "-" + strconv.Itoa(i) + } + } + + return namesByJSON, ret, nil +} + +func operations(path *spec.PathItem) []*spec.Operation { + return []*spec.Operation{path.Get, path.Put, path.Post, path.Delete, path.Options, path.Head, path.Patch} +} + +func base64Hash(s string) string { + hash := fnv.New64() + hash.Write([]byte(s)) //nolint:errcheck + return base64.URLEncoding.EncodeToString(hash.Sum(make([]byte, 0, 8))[:6]) // 8 characters +} + +func replaceSharedParameters(sharedParameterNamesByJSON map[string]string, sp *spec.Swagger) (*spec.Swagger, error) { + if sp == nil || sp.Paths == nil { + return sp, nil + } + + ret := sp + + firstPathChange := true + for k, path := range sp.Paths.Paths { + pathChanged := false + + // per operation parameters + for _, op := range []**spec.Operation{&path.Get, &path.Put, &path.Post, &path.Delete, &path.Options, &path.Head, &path.Patch} { + if *op == nil { + continue + } + + firstParamChange := true + for i := range (*op).Parameters { + p := (*op).Parameters[i] + + if p.Ref.String() != "" { + // shouldn't happen, but be idem-potent if it does + continue + } + + bs, err := json.Marshal(p) + if err != nil { + return nil, err + } + + if name, ok := sharedParameterNamesByJSON[string(bs)]; ok { + if firstParamChange { + orig := *op + *op = &spec.Operation{} + **op = *orig + (*op).Parameters = make([]spec.Parameter, len(orig.Parameters)) + copy((*op).Parameters, orig.Parameters) + firstParamChange = false + } + + (*op).Parameters[i] = spec.Parameter{ + Refable: spec.Refable{ + Ref: spec.MustCreateRef("#/parameters/" + name), + }, + } + pathChanged = true + } + } + } + + // per path parameters + firstParamChange := true + for i := range path.Parameters { + p := path.Parameters[i] + + if p.Ref.String() != "" { + // shouldn't happen, but be idem-potent if it does + continue + } + + bs, err := json.Marshal(p) + if err != nil { + return nil, err + } + + if name, ok := sharedParameterNamesByJSON[string(bs)]; ok { + if firstParamChange { + orig := path.Parameters + path.Parameters = make([]spec.Parameter, len(orig)) + copy(path.Parameters, orig) + firstParamChange = false + } + + path.Parameters[i] = spec.Parameter{ + Refable: spec.Refable{ + Ref: spec.MustCreateRef("#/parameters/" + name), + }, + } + pathChanged = true + } + } + + if pathChanged { + if firstPathChange { + clone := *sp + ret = &clone + + pathsClone := *ret.Paths + ret.Paths = &pathsClone + + ret.Paths.Paths = make(map[string]spec.PathItem, len(sp.Paths.Paths)) + for k, v := range sp.Paths.Paths { + ret.Paths.Paths[k] = v + } + + firstPathChange = false + } + ret.Paths.Paths[k] = path + } + } + + return ret, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go index a0c00f07..081dae30 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go @@ -156,7 +156,9 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str } r := &spec3.RequestBody{ RequestBodyProps: spec3.RequestBodyProps{ - Content: map[string]*spec3.MediaType{}, + Content: map[string]*spec3.MediaType{}, + Description: param.Description(), + Required: param.Required(), }, } for _, consume := range consumes { @@ -172,9 +174,9 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str return nil, nil } -func newOpenAPI(config *common.Config) openAPI { +func newOpenAPI(config *common.OpenAPIV3Config) openAPI { o := openAPI{ - config: common.ConvertConfigToV3(config), + config: config, spec: &spec3.OpenAPI{ Version: "3.0.0", Info: config.Info, @@ -313,20 +315,38 @@ func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error { // BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it. // // Deprecated: BuildOpenAPISpecFromRoutes should be used instead. -func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) { +func BuildOpenAPISpec(webServices []*restful.WebService, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) { return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config) } // BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it. -func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) { +func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) { a := newOpenAPI(config) err := a.buildOpenAPISpec(webServices) if err != nil { return nil, err } + if config.PostProcessSpec != nil { + return config.PostProcessSpec(a.spec) + } return a.spec, nil } +// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it. +// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the +// passed type names. +func BuildOpenAPIDefinitionsForResources(config *common.OpenAPIV3Config, names ...string) (map[string]*spec.Schema, error) { + o := newOpenAPI(config) + // We can discard the return value of toSchema because all we care about is the side effect of calling it. + // All the models created for this resource get added to o.swagger.Definitions + for _, name := range names { + _, err := o.toSchema(name) + if err != nil { + return nil, err + } + } + return o.spec.Components.Schemas, nil +} func (o *openAPI) findCommonParameters(routes []common.Route) (map[interface{}]*spec3.Parameter, error) { commonParamsMap := make(map[interface{}]*spec3.Parameter, 0) paramOpsCountByName := make(map[interface{}]int, 0) diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go new file mode 100644 index 00000000..a66fe8a0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -0,0 +1,290 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cached provides a cache mechanism based on etags to lazily +// build, and/or cache results from expensive operation such that those +// operations are not repeated unnecessarily. The operations can be +// created as a tree, and replaced dynamically as needed. +// +// All the operations in this module are thread-safe. +// +// # Dependencies and types of caches +// +// This package uses a source/transform/sink model of caches to build +// the dependency tree, and can be used as follows: +// - [Func]: A source cache that recomputes the content every time. +// - [Once]: A source cache that always produces the +// same content, it is only called once. +// - [Transform]: A cache that transforms data from one format to +// another. It's only refreshed when the source changes. +// - [Merge]: A cache that aggregates multiple caches in a map into one. +// It's only refreshed when the source changes. +// - [MergeList]: A cache that aggregates multiple caches in a list into one. +// It's only refreshed when the source changes. +// - [Atomic]: A cache adapter that atomically replaces the source with a new one. +// - [LastSuccess]: A cache adapter that caches the last successful and returns +// it if the next call fails. It extends [Atomic]. +// +// # Etags +// +// Etags in this library is a cache version identifier. It doesn't +// necessarily strictly match to the semantics of http `etags`, but are +// somewhat inspired from it and function with the same principles. +// Hashing the content is a good way to guarantee that your function is +// never going to be called spuriously. In Kubernetes world, this could +// be a `resourceVersion`, this can be an actual etag, a hash, a UUID +// (if the cache always changes), or even a made-up string when the +// content of the cache never changes. +package cached + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// Value is wrapping a value behind a getter for lazy evaluation. +type Value[T any] interface { + Get() (value T, etag string, err error) +} + +// Result is wrapping T and error into a struct for cases where a tuple is more +// convenient or necessary in Golang. +type Result[T any] struct { + Value T + Etag string + Err error +} + +func (r Result[T]) Get() (T, string, error) { + return r.Value, r.Etag, r.Err +} + +// Func wraps a (thread-safe) function as a Value[T]. +func Func[T any](fn func() (T, string, error)) Value[T] { + return valueFunc[T](fn) +} + +type valueFunc[T any] func() (T, string, error) + +func (c valueFunc[T]) Get() (T, string, error) { + return c() +} + +// Static returns constant values. +func Static[T any](value T, etag string) Value[T] { + return Result[T]{Value: value, Etag: etag} +} + +// Merge merges a of cached values. The merge function only gets called if any of +// the dependency has changed. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is run again. +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +// +// Also note that Golang map iteration is not stable. If the mergeFn +// depends on the order iteration to be stable, it will need to +// implement its own sorting or iteration order. +func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, string, error), caches map[K]Value[T]) Value[V] { + list := make([]Value[T], 0, len(caches)) + + // map from index to key + indexes := make(map[int]K, len(caches)) + i := 0 + for k := range caches { + list = append(list, caches[k]) + indexes[i] = k + i++ + } + + return MergeList(func(results []Result[T]) (V, string, error) { + if len(results) != len(indexes) { + panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) + } + m := make(map[K]Result[T], len(results)) + for i := range results { + m[indexes[i]] = results[i] + } + return mergeFn(m) + }, list) +} + +// MergeList merges a list of cached values. The function only gets called if +// any of the dependency has changed. +// +// The benefit of ListMerger over the basic Merger is that caches are +// stored in an ordered list so the order of the cache will be +// preserved in the order of the results passed to the mergeFn. +// +// If any of the dependency returned an error before, or any of the +// dependency returned an error this time, or if the mergeFn failed +// before, then the function is reran. +// +// Note that this assumes there is no "partial" merge, the merge +// function will remerge all the dependencies together everytime. Since +// the list of dependencies is constant, there is no way to save some +// partial merge information either. +func MergeList[T, V any](mergeFn func(results []Result[T]) (V, string, error), delegates []Value[T]) Value[V] { + return &listMerger[T, V]{ + mergeFn: mergeFn, + delegates: delegates, + } +} + +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) (V, string, error) + delegates []Value[T] + cache []Result[T] + result Result[V] +} + +func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { + cacheResults := make([]Result[T], len(c.delegates)) + ch := make(chan struct { + int + Result[T] + }, len(c.delegates)) + for i := range c.delegates { + go func(index int) { + value, etag, err := c.delegates[index].Get() + ch <- struct { + int + Result[T] + }{index, Result[T]{Value: value, Etag: etag, Err: err}} + }(i) + } + for i := 0; i < len(c.delegates); i++ { + res := <-ch + cacheResults[res.int] = res.Result + } + return cacheResults +} + +func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { + if c.cache == nil { + return true + } + if c.result.Err != nil { + return true + } + if len(results) != len(c.cache) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cache))) + } + for i, oldResult := range c.cache { + newResult := results[i] + if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { + return true + } + } + return false +} + +func (c *listMerger[T, V]) Get() (V, string, error) { + c.lock.Lock() + defer c.lock.Unlock() + cacheResults := c.prepareResultsLocked() + if c.needsRunningLocked(cacheResults) { + c.cache = cacheResults + c.result.Value, c.result.Etag, c.result.Err = c.mergeFn(c.cache) + } + return c.result.Value, c.result.Etag, c.result.Err +} + +// Transform the result of another cached value. The transformFn will only be called +// if the source has updated, otherwise, the result will be returned. +// +// If the dependency returned an error before, or it returns an error +// this time, or if the transformerFn failed before, the function is +// reran. +func Transform[T, V any](transformerFn func(T, string, error) (V, string, error), source Value[T]) Value[V] { + return MergeList(func(delegates []Result[T]) (V, string, error) { + if len(delegates) != 1 { + panic(fmt.Errorf("invalid cache for transformer cache: %v", delegates)) + } + return transformerFn(delegates[0].Value, delegates[0].Etag, delegates[0].Err) + }, []Value[T]{source}) +} + +// Once calls Value[T].Get() lazily and only once, even in case of an error result. +func Once[T any](d Value[T]) Value[T] { + return &once[T]{ + data: d, + } +} + +type once[T any] struct { + once sync.Once + data Value[T] + result Result[T] +} + +func (c *once[T]) Get() (T, string, error) { + c.once.Do(func() { + c.result.Value, c.result.Etag, c.result.Err = c.data.Get() + }) + return c.result.Value, c.result.Etag, c.result.Err +} + +// Replaceable extends the Value[T] interface with the ability to change the +// underlying Value[T] after construction. +type Replaceable[T any] interface { + Value[T] + Store(Value[T]) +} + +// Atomic wraps a Value[T] as an atomic value that can be replaced. It implements +// Replaceable[T]. +type Atomic[T any] struct { + value atomic.Pointer[Value[T]] +} + +var _ Replaceable[[]byte] = &Atomic[[]byte]{} + +func (x *Atomic[T]) Store(val Value[T]) { x.value.Store(&val) } +func (x *Atomic[T]) Get() (T, string, error) { return (*x.value.Load()).Get() } + +// LastSuccess calls Value[T].Get(), but hides errors by returning the last +// success if there has been any. +type LastSuccess[T any] struct { + Atomic[T] + success atomic.Pointer[Result[T]] +} + +var _ Replaceable[[]byte] = &LastSuccess[[]byte]{} + +func (c *LastSuccess[T]) Get() (T, string, error) { + success := c.success.Load() + value, etag, err := c.Atomic.Get() + if err == nil { + if success == nil { + c.success.CompareAndSwap(nil, &Result[T]{Value: value, Etag: etag, Err: err}) + } + return value, etag, err + } + + if success != nil { + return success.Value, success.Etag, success.Err + } + + return value, etag, err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 24f2b0e8..e4ce843b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -22,7 +22,6 @@ import ( "github.com/emicklei/go-restful/v3" - "k8s.io/kube-openapi/pkg/openapiconv" "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -165,6 +164,9 @@ type OpenAPIV3Config struct { // It is an optional function to customize model names. GetDefinitionName func(name string) (string, spec.Extensions) + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error) + // SecuritySchemes is list of all security schemes for OpenAPI service. SecuritySchemes spec3.SecuritySchemes @@ -172,43 +174,6 @@ type OpenAPIV3Config struct { DefaultSecurity []map[string][]string } -// ConvertConfigToV3 converts a Config object to an OpenAPIV3Config object -func ConvertConfigToV3(config *Config) *OpenAPIV3Config { - if config == nil { - return nil - } - - v3Config := &OpenAPIV3Config{ - Info: config.Info, - IgnorePrefixes: config.IgnorePrefixes, - GetDefinitions: config.GetDefinitions, - GetOperationIDAndTags: config.GetOperationIDAndTags, - GetOperationIDAndTagsFromRoute: config.GetOperationIDAndTagsFromRoute, - GetDefinitionName: config.GetDefinitionName, - Definitions: config.Definitions, - SecuritySchemes: make(spec3.SecuritySchemes), - DefaultSecurity: config.DefaultSecurity, - DefaultResponse: openapiconv.ConvertResponse(config.DefaultResponse, []string{"application/json"}), - - CommonResponses: make(map[int]*spec3.Response), - ResponseDefinitions: make(map[string]*spec3.Response), - } - - if config.SecurityDefinitions != nil { - for s, securityScheme := range *config.SecurityDefinitions { - v3Config.SecuritySchemes[s] = openapiconv.ConvertSecurityScheme(securityScheme) - } - } - for k, commonResponse := range config.CommonResponses { - v3Config.CommonResponses[k] = openapiconv.ConvertResponse(&commonResponse, []string{"application/json"}) - } - - for k, responseDefinition := range config.ResponseDefinitions { - v3Config.ResponseDefinitions[k] = openapiconv.ConvertResponse(&responseDefinition, []string{"application/json"}) - } - return v3Config -} - type typeInfo struct { name string format string @@ -246,38 +211,42 @@ var schemaTypeFormatMap = map[string]typeInfo{ // the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple // type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. // Example: -// type Sample struct { -// ... -// // port of the server -// port IntOrString -// ... -// } +// +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // // IntOrString documentation... // type IntOrString { ... } // // Adding IntOrString to this function: -// "port" : { -// format: "string", -// type: "int-or-string", -// Description: "port of the server" -// } +// +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } // // Implement OpenAPIDefinitionGetter for IntOrString: // -// "port" : { -// $Ref: "#/definitions/IntOrString" -// Description: "port of the server" -// } +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// // ... // definitions: -// { -// "IntOrString": { -// format: "string", -// type: "int-or-string", -// Description: "IntOrString documentation..." // new -// } -// } // +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } func OpenAPITypeFormat(typeName string) (string, string) { mapped, ok := schemaTypeFormatMap[typeName] if !ok { diff --git a/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go b/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go index 6755f876..932b84a0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/restfuladapter/adapter.go @@ -13,4 +13,3 @@ func AdaptWebServices(webServices []*restful.WebService) []common.RouteContainer } return containers } - diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go index f8e64e44..5deff4d5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go @@ -20,14 +20,13 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "sort" "k8s.io/kube-openapi/pkg/generators/rules" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" ) @@ -64,7 +63,7 @@ func (a apiViolationFile) VerifyFile(f *generator.File, path string) error { path = a.unmangledPath formatted := f.Body.Bytes() - existing, err := ioutil.ReadFile(path) + existing, err := os.ReadFile(path) if err != nil { return fmt.Errorf("unable to read file %q for comparison: %v", path, err) } @@ -95,7 +94,7 @@ func newAPIViolationGen() *apiViolationGen { } type apiViolationGen struct { - generator.DefaultGen + generator.GoGenerator linter *apiLinter } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/config.go b/vendor/k8s.io/kube-openapi/pkg/generators/config.go index d728f2a3..1fbd7759 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/config.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/config.go @@ -17,16 +17,14 @@ limitations under the License. package generators import ( - "fmt" - "path/filepath" + "path" - "k8s.io/gengo/args" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" "k8s.io/klog/v2" - - generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args" + "k8s.io/kube-openapi/cmd/openapi-gen/args" ) type identityNamer struct{} @@ -51,36 +49,31 @@ func DefaultNameSystem() string { return "sorting_namer" } -func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { - boilerplate, err := arguments.LoadGoBoilerplate() +func GetTargets(context *generator.Context, args *args.Args) []generator.Target { + boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) if err != nil { klog.Fatalf("Failed loading boilerplate: %v", err) } - header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...) - header = append(header, []byte( - ` -// This file was autogenerated by openapi-gen. Do not edit it manually! - -`)...) reportPath := "-" - if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok { - reportPath = customArgs.ReportFilename + if args.ReportFilename != "" { + reportPath = args.ReportFilename } context.FileTypes[apiViolationFileType] = apiViolationFile{ unmangledPath: reportPath, } - return generator.Packages{ - &generator.DefaultPackage{ - PackageName: filepath.Base(arguments.OutputPackagePath), - PackagePath: arguments.OutputPackagePath, - HeaderText: header, - GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { + return []generator.Target{ + &generator.SimpleTarget{ + PkgName: path.Base(args.OutputPkg), // `path` vs. `filepath` because packages use '/' + PkgPath: args.OutputPkg, + PkgDir: args.OutputDir, + HeaderComment: boilerplate, + GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { return []generator.Generator{ newOpenAPIGen( - arguments.OutputFileBaseName, - arguments.OutputPackagePath, + args.OutputFile, + args.OutputPkg, ), newAPIViolationGen(), } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/enum.go b/vendor/k8s.io/kube-openapi/pkg/generators/enum.go index cf528048..3db034d6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/enum.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/enum.go @@ -22,8 +22,9 @@ import ( "sort" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/types" ) const tagEnumType = "enum" @@ -78,9 +79,12 @@ func (et *enumType) ValueStrings() []string { // DescriptionLines returns a description of the enum in this format: // // Possible enum values: -// - `"value1"` description 1 -// - `"value2"` description 2 +// - `"value1"` description 1 +// - `"value2"` description 2 func (et *enumType) DescriptionLines() []string { + if len(et.Values) == 0 { + return nil + } var lines []string for _, value := range et.Values { lines = append(lines, value.Description()) @@ -118,7 +122,7 @@ func parseEnums(c *generator.Context) enumMap { Value: *c.ConstValue, Comment: strings.Join(c.CommentLines, " "), } - enumTypes[enumType.Name].appendValue(value) + enumTypes[enumType.Name].addIfNotPresent(value) } } } @@ -126,13 +130,27 @@ func parseEnums(c *generator.Context) enumMap { return enumTypes } -func (et *enumType) appendValue(value *enumValue) { +func (et *enumType) addIfNotPresent(value *enumValue) { + // If we already have an enum case with the same value, then ignore this new + // one. This can happen if an enum aliases one from another package and + // re-exports the cases. + for i, existing := range et.Values { + if existing.Value == value.Value { + + // Take the value of the longer comment (or some other deterministic tie breaker) + if len(existing.Comment) < len(value.Comment) || (len(existing.Comment) == len(value.Comment) && existing.Comment > value.Comment) { + et.Values[i] = value + } + + return + } + } et.Values = append(et.Values, value) } // Description returns the description line for the enumValue // with the format: -// - `"FooValue"` is the Foo value +// - `"FooValue"` is the Foo value func (ev *enumValue) Description() string { comment := strings.TrimSpace(ev.Comment) // The comment should starts with the type name, trim it first. @@ -152,7 +170,7 @@ func isEnumType(stringType *types.Type, t *types.Type) bool { } func hasEnumTag(t *types.Type) bool { - return types.ExtractCommentTags("+", t.CommentLines)[tagEnumType] != nil + return gengo.ExtractCommentTags("+", t.CommentLines)[tagEnumType] != nil } // whitespaceRegex is the regex for consecutive whitespaces. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go index e37d93ef..42d38541 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/extension.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/extension.go @@ -21,8 +21,9 @@ import ( "sort" "strings" - "k8s.io/gengo/examples/set-gen/sets" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/types" + "k8s.io/kube-openapi/pkg/util/sets" ) const extensionPrefix = "x-kubernetes-" @@ -171,7 +172,7 @@ func parseExtensions(comments []string) ([]extension, []error) { } } // Next, generate extensions from "idlTags" (e.g. +listType) - tagValues := types.ExtractCommentTags("+", comments) + tagValues := gengo.ExtractCommentTags("+", comments) for _, idlTag := range sortedMapKeys(tagValues) { xAttrs, exists := tagToExtension[idlTag] if !exists { diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/markers.go b/vendor/k8s.io/kube-openapi/pkg/generators/markers.go new file mode 100644 index 00000000..7f0fe985 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/markers.go @@ -0,0 +1,613 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package generators + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "k8s.io/gengo/v2/types" + openapi "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +type CELTag struct { + Rule string `json:"rule,omitempty"` + Message string `json:"message,omitempty"` + MessageExpression string `json:"messageExpression,omitempty"` + OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` + Reason string `json:"reason,omitempty"` + FieldPath string `json:"fieldPath,omitempty"` +} + +func (c *CELTag) Validate() error { + if c == nil || *c == (CELTag{}) { + return fmt.Errorf("empty CEL tag is not allowed") + } + + var errs []error + if c.Rule == "" { + errs = append(errs, fmt.Errorf("rule cannot be empty")) + } + if c.Message == "" && c.MessageExpression == "" { + errs = append(errs, fmt.Errorf("message or messageExpression must be set")) + } + if c.Message != "" && c.MessageExpression != "" { + errs = append(errs, fmt.Errorf("message and messageExpression cannot be set at the same time")) + } + + if len(errs) > 0 { + return errors.Join(errs...) + } + + return nil +} + +// commentTags represents the parsed comment tags for a given type. These types are then used to generate schema validations. +// These only include the newer prefixed tags. The older tags are still supported, +// but are not included in this struct. Comment Tags are transformed into a +// *spec.Schema, which is then combined with the older marker comments to produce +// the generated OpenAPI spec. +// +// List of tags not included in this struct: +// +// - +optional +// - +default +// - +listType +// - +listMapKeys +// - +mapType +type commentTags struct { + spec.SchemaProps + + CEL []CELTag `json:"cel,omitempty"` + + // Future markers can all be parsed into this centralized struct... + // Optional bool `json:"optional,omitempty"` + // Default any `json:"default,omitempty"` +} + +// Returns the schema for the given CommentTags instance. +// This is the final authoritative schema for the comment tags +func (c commentTags) ValidationSchema() (*spec.Schema, error) { + res := spec.Schema{ + SchemaProps: c.SchemaProps, + } + + if len(c.CEL) > 0 { + // Convert the CELTag to a map[string]interface{} via JSON + celTagJSON, err := json.Marshal(c.CEL) + if err != nil { + return nil, fmt.Errorf("failed to marshal CEL tag: %w", err) + } + var celTagMap []interface{} + if err := json.Unmarshal(celTagJSON, &celTagMap); err != nil { + return nil, fmt.Errorf("failed to unmarshal CEL tag: %w", err) + } + + res.VendorExtensible.AddExtension("x-kubernetes-validations", celTagMap) + } + + return &res, nil +} + +// validates the parameters in a CommentTags instance. Returns any errors encountered. +func (c commentTags) Validate() error { + + var err error + + if c.MinLength != nil && *c.MinLength < 0 { + err = errors.Join(err, fmt.Errorf("minLength cannot be negative")) + } + if c.MaxLength != nil && *c.MaxLength < 0 { + err = errors.Join(err, fmt.Errorf("maxLength cannot be negative")) + } + if c.MinItems != nil && *c.MinItems < 0 { + err = errors.Join(err, fmt.Errorf("minItems cannot be negative")) + } + if c.MaxItems != nil && *c.MaxItems < 0 { + err = errors.Join(err, fmt.Errorf("maxItems cannot be negative")) + } + if c.MinProperties != nil && *c.MinProperties < 0 { + err = errors.Join(err, fmt.Errorf("minProperties cannot be negative")) + } + if c.MaxProperties != nil && *c.MaxProperties < 0 { + err = errors.Join(err, fmt.Errorf("maxProperties cannot be negative")) + } + if c.Minimum != nil && c.Maximum != nil && *c.Minimum > *c.Maximum { + err = errors.Join(err, fmt.Errorf("minimum %f is greater than maximum %f", *c.Minimum, *c.Maximum)) + } + if (c.ExclusiveMinimum || c.ExclusiveMaximum) && c.Minimum != nil && c.Maximum != nil && *c.Minimum == *c.Maximum { + err = errors.Join(err, fmt.Errorf("exclusiveMinimum/Maximum cannot be set when minimum == maximum")) + } + if c.MinLength != nil && c.MaxLength != nil && *c.MinLength > *c.MaxLength { + err = errors.Join(err, fmt.Errorf("minLength %d is greater than maxLength %d", *c.MinLength, *c.MaxLength)) + } + if c.MinItems != nil && c.MaxItems != nil && *c.MinItems > *c.MaxItems { + err = errors.Join(err, fmt.Errorf("minItems %d is greater than maxItems %d", *c.MinItems, *c.MaxItems)) + } + if c.MinProperties != nil && c.MaxProperties != nil && *c.MinProperties > *c.MaxProperties { + err = errors.Join(err, fmt.Errorf("minProperties %d is greater than maxProperties %d", *c.MinProperties, *c.MaxProperties)) + } + if c.Pattern != "" { + _, e := regexp.Compile(c.Pattern) + if e != nil { + err = errors.Join(err, fmt.Errorf("invalid pattern %q: %v", c.Pattern, e)) + } + } + if c.MultipleOf != nil && *c.MultipleOf == 0 { + err = errors.Join(err, fmt.Errorf("multipleOf cannot be 0")) + } + + for i, celTag := range c.CEL { + celError := celTag.Validate() + if celError == nil { + continue + } + err = errors.Join(err, fmt.Errorf("invalid CEL tag at index %d: %w", i, celError)) + } + + return err +} + +// Performs type-specific validation for CommentTags porameters. Accepts a Type instance and returns any errors encountered during validation. +func (c commentTags) ValidateType(t *types.Type) error { + var err error + + resolvedType := resolveAliasAndPtrType(t) + typeString, _ := openapi.OpenAPITypeFormat(resolvedType.String()) // will be empty for complicated types + + // Structs and interfaces may dynamically be any type, so we cant validate them + // easily. We may be able to if we check that they don't implement all the + // override functions, but for now we just skip them. + if resolvedType.Kind == types.Interface || resolvedType.Kind == types.Struct { + return nil + } + + isArray := resolvedType.Kind == types.Slice || resolvedType.Kind == types.Array + isMap := resolvedType.Kind == types.Map + isString := typeString == "string" + isInt := typeString == "integer" + isFloat := typeString == "number" + + if c.MaxItems != nil && !isArray { + err = errors.Join(err, fmt.Errorf("maxItems can only be used on array types")) + } + if c.MinItems != nil && !isArray { + err = errors.Join(err, fmt.Errorf("minItems can only be used on array types")) + } + if c.UniqueItems && !isArray { + err = errors.Join(err, fmt.Errorf("uniqueItems can only be used on array types")) + } + if c.MaxProperties != nil && !isMap { + err = errors.Join(err, fmt.Errorf("maxProperties can only be used on map types")) + } + if c.MinProperties != nil && !isMap { + err = errors.Join(err, fmt.Errorf("minProperties can only be used on map types")) + } + if c.MinLength != nil && !isString { + err = errors.Join(err, fmt.Errorf("minLength can only be used on string types")) + } + if c.MaxLength != nil && !isString { + err = errors.Join(err, fmt.Errorf("maxLength can only be used on string types")) + } + if c.Pattern != "" && !isString { + err = errors.Join(err, fmt.Errorf("pattern can only be used on string types")) + } + if c.Minimum != nil && !isInt && !isFloat { + err = errors.Join(err, fmt.Errorf("minimum can only be used on numeric types")) + } + if c.Maximum != nil && !isInt && !isFloat { + err = errors.Join(err, fmt.Errorf("maximum can only be used on numeric types")) + } + if c.MultipleOf != nil && !isInt && !isFloat { + err = errors.Join(err, fmt.Errorf("multipleOf can only be used on numeric types")) + } + if c.ExclusiveMinimum && !isInt && !isFloat { + err = errors.Join(err, fmt.Errorf("exclusiveMinimum can only be used on numeric types")) + } + if c.ExclusiveMaximum && !isInt && !isFloat { + err = errors.Join(err, fmt.Errorf("exclusiveMaximum can only be used on numeric types")) + } + + return err +} + +// Parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result. +// Accepts an optional type to validate against, and a prefix to filter out markers not related to validation. +// Accepts a prefix to filter out markers not related to validation. +// Returns any errors encountered while parsing or validating the comment tags. +func ParseCommentTags(t *types.Type, comments []string, prefix string) (*spec.Schema, error) { + + markers, err := parseMarkers(comments, prefix) + if err != nil { + return nil, fmt.Errorf("failed to parse marker comments: %w", err) + } + nested, err := nestMarkers(markers) + if err != nil { + return nil, fmt.Errorf("invalid marker comments: %w", err) + } + + // Parse the map into a CommentTags type by marshalling and unmarshalling + // as JSON in leiu of an unstructured converter. + out, err := json.Marshal(nested) + if err != nil { + return nil, fmt.Errorf("failed to marshal marker comments: %w", err) + } + + var commentTags commentTags + if err = json.Unmarshal(out, &commentTags); err != nil { + return nil, fmt.Errorf("failed to unmarshal marker comments: %w", err) + } + + // Validate the parsed comment tags + validationErrors := commentTags.Validate() + + if t != nil { + validationErrors = errors.Join(validationErrors, commentTags.ValidateType(t)) + } + + if validationErrors != nil { + return nil, fmt.Errorf("invalid marker comments: %w", validationErrors) + } + + return commentTags.ValidationSchema() +} + +var ( + allowedKeyCharacterSet = `[:_a-zA-Z0-9\[\]\-]` + valueEmpty = regexp.MustCompile(fmt.Sprintf(`^(%s*)$`, allowedKeyCharacterSet)) + valueAssign = regexp.MustCompile(fmt.Sprintf(`^(%s*)=(.*)$`, allowedKeyCharacterSet)) + valueRawString = regexp.MustCompile(fmt.Sprintf(`^(%s*)>(.*)$`, allowedKeyCharacterSet)) +) + +// extractCommentTags parses comments for lines of the form: +// +// 'marker' + "key=value" +// +// or to specify truthy boolean keys: +// +// 'marker' + "key" +// +// Values are optional; "" is the default. A tag can be specified more than +// one time and all values are returned. Returns a map with an entry for +// for each key and a value. +// +// Similar to version from gengo, but this version support only allows one +// value per key (preferring explicit array indices), supports raw strings +// with concatenation, and limits the usable characters allowed in a key +// (for simpler parsing). +// +// Assignments and empty values have the same syntax as from gengo. Raw strings +// have the syntax: +// +// 'marker' + "key>value" +// 'marker' + "key>value" +// +// Successive usages of the same raw string key results in concatenating each +// line with `\n` in between. It is an error to use `=` to assing to a previously +// assigned key +// (in contrast to types.ExtractCommentTags which allows array-typed +// values to be specified using `=`). +func extractCommentTags(marker string, lines []string) (map[string]string, error) { + out := map[string]string{} + + // Used to track the the line immediately prior to the one being iterated. + // If there was an invalid or ignored line, these values get reset. + lastKey := "" + lastIndex := -1 + lastArrayKey := "" + + var lintErrors []error + + for _, line := range lines { + line = strings.Trim(line, " ") + + // Track the current value of the last vars to use in this loop iteration + // before they are reset for the next iteration. + previousKey := lastKey + previousArrayKey := lastArrayKey + previousIndex := lastIndex + + // Make sure last vars gets reset if we `continue` + lastIndex = -1 + lastArrayKey = "" + lastKey = "" + + if len(line) == 0 { + continue + } else if !strings.HasPrefix(line, marker) { + continue + } + + line = strings.TrimPrefix(line, marker) + + key := "" + value := "" + + if matches := valueAssign.FindStringSubmatch(line); matches != nil { + key = matches[1] + value = matches[2] + + // If key exists, throw error. + // Some of the old kube open-api gen marker comments like + // `+listMapKeys` allowed a list to be specified by writing key=value + // multiple times. + // + // This is not longer supported for the prefixed marker comments. + // This is to prevent confusion with the new array syntax which + // supports lists of objects. + // + // The old marker comments like +listMapKeys will remain functional, + // but new markers will not support it. + if _, ok := out[key]; ok { + return nil, fmt.Errorf("cannot have multiple values for key '%v'", key) + } + + } else if matches := valueEmpty.FindStringSubmatch(line); matches != nil { + key = matches[1] + value = "" + + } else if matches := valueRawString.FindStringSubmatch(line); matches != nil { + toAdd := strings.Trim(string(matches[2]), " ") + + key = matches[1] + + // First usage as a raw string. + if existing, exists := out[key]; !exists { + + // Encode the raw string as JSON to ensure that it is properly escaped. + valueBytes, err := json.Marshal(toAdd) + if err != nil { + return nil, fmt.Errorf("invalid value for key %v: %w", key, err) + } + + value = string(valueBytes) + } else if key != previousKey { + // Successive usages of the same key of a raw string must be + // consecutive + return nil, fmt.Errorf("concatenations to key '%s' must be consecutive with its assignment", key) + } else { + // If it is a consecutive repeat usage, concatenate to the + // existing value. + // + // Decode JSON string, append to it, re-encode JSON string. + // Kinda janky but this is a code-generator... + var unmarshalled string + if err := json.Unmarshal([]byte(existing), &unmarshalled); err != nil { + return nil, fmt.Errorf("invalid value for key %v: %w", key, err) + } else { + unmarshalled += "\n" + toAdd + valueBytes, err := json.Marshal(unmarshalled) + if err != nil { + return nil, fmt.Errorf("invalid value for key %v: %w", key, err) + } + + value = string(valueBytes) + } + } + } else { + // Comment has the correct prefix, but incorrect syntax, so it is an + // error + return nil, fmt.Errorf("invalid marker comment does not match expected `+key=` pattern: %v", line) + } + + out[key] = value + lastKey = key + + // Lint the array subscript for common mistakes. This only lints the last + // array index used, (since we do not have a need for nested arrays yet + // in markers) + if arrayPath, index, hasSubscript, err := extractArraySubscript(key); hasSubscript { + // If index is non-zero, check that that previous line was for the same + // key and either the same or previous index + if err != nil { + lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: expected integer index in key '%v'", line, key)) + } else if previousArrayKey != arrayPath && index != 0 { + lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: non-consecutive index %v for key '%v'", line, index, arrayPath)) + } else if index != previousIndex+1 && index != previousIndex { + lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: non-consecutive index %v for key '%v'", line, index, arrayPath)) + } + + lastIndex = index + lastArrayKey = arrayPath + } + } + + if len(lintErrors) > 0 { + return nil, errors.Join(lintErrors...) + } + + return out, nil +} + +// Extracts and parses the given marker comments into a map of key -> value. +// Accepts a prefix to filter out markers not related to validation. +// The prefix is removed from the key in the returned map. +// Empty keys and invalid values will return errors, refs are currently unsupported and will be skipped. +func parseMarkers(markerComments []string, prefix string) (map[string]any, error) { + markers, err := extractCommentTags(prefix, markerComments) + if err != nil { + return nil, err + } + + // Parse the values as JSON + result := map[string]any{} + for key, value := range markers { + var unmarshalled interface{} + + if len(key) == 0 { + return nil, fmt.Errorf("cannot have empty key for marker comment") + } else if _, ok := parseSymbolReference(value, ""); ok { + // Skip ref markers + continue + } else if len(value) == 0 { + // Empty value means key is implicitly a bool + result[key] = true + } else if err := json.Unmarshal([]byte(value), &unmarshalled); err != nil { + // Not valid JSON, throw error + return nil, fmt.Errorf("failed to parse value for key %v as JSON: %w", key, err) + } else { + // Is is valid JSON, use as a JSON value + result[key] = unmarshalled + } + } + return result, nil +} + +// Converts a map of: +// +// "a:b:c": 1 +// "a:b:d": 2 +// "a:e": 3 +// "f": 4 +// +// Into: +// +// map[string]any{ +// "a": map[string]any{ +// "b": map[string]any{ +// "c": 1, +// "d": 2, +// }, +// "e": 3, +// }, +// "f": 4, +// } +// +// Returns a list of joined errors for any invalid keys. See putNestedValue for more details. +func nestMarkers(markers map[string]any) (map[string]any, error) { + nested := make(map[string]any) + var errs []error + for key, value := range markers { + var err error + keys := strings.Split(key, ":") + + if err = putNestedValue(nested, keys, value); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + + return nested, nil +} + +// Recursively puts a value into the given keypath, creating intermediate maps +// and slices as needed. If a key is of the form `foo[bar]`, then bar will be +// treated as an index into the array foo. If bar is not a valid integer, putNestedValue returns an error. +func putNestedValue(m map[string]any, k []string, v any) error { + if len(k) == 0 { + return nil + } + + key := k[0] + rest := k[1:] + + // Array case + if arrayKeyWithoutSubscript, index, hasSubscript, err := extractArraySubscript(key); err != nil { + return fmt.Errorf("error parsing subscript for key %v: %w", key, err) + } else if hasSubscript { + key = arrayKeyWithoutSubscript + var arrayDestination []any + if existing, ok := m[key]; !ok { + arrayDestination = make([]any, index+1) + } else if existing, ok := existing.([]any); !ok { + // Error case. Existing isn't of correct type. Can happen if + // someone is subscripting a field that was previously not an array + return fmt.Errorf("expected []any at key %v, got %T", key, existing) + } else if index >= len(existing) { + // Ensure array is big enough + arrayDestination = append(existing, make([]any, index-len(existing)+1)...) + } else { + arrayDestination = existing + } + + m[key] = arrayDestination + if arrayDestination[index] == nil { + // Doesn't exist case, create the destination. + // Assumes the destination is a map for now. Theoretically could be + // extended to support arrays of arrays, but that's not needed yet. + destination := make(map[string]any) + arrayDestination[index] = destination + if err = putNestedValue(destination, rest, v); err != nil { + return err + } + } else if dst, ok := arrayDestination[index].(map[string]any); ok { + // Already exists case, correct type + if putNestedValue(dst, rest, v); err != nil { + return err + } + } else { + // Already exists, incorrect type. Error + // This shouldn't be possible. + return fmt.Errorf("expected map at %v[%v], got %T", key, index, arrayDestination[index]) + } + + return nil + } else if len(rest) == 0 { + // Base case. Single key. Just set into destination + m[key] = v + return nil + } + + if existing, ok := m[key]; !ok { + destination := make(map[string]any) + m[key] = destination + return putNestedValue(destination, rest, v) + } else if destination, ok := existing.(map[string]any); ok { + return putNestedValue(destination, rest, v) + } else { + // Error case. Existing isn't of correct type. Can happen if prior comment + // referred to value as an error + return fmt.Errorf("expected map[string]any at key %v, got %T", key, existing) + } +} + +// extractArraySubscript extracts the left array subscript from a key of +// the form `foo[bar][baz]` -> "bar". +// Returns the key without the subscript, the index, and a bool indicating if +// the key had a subscript. +// If the key has a subscript, but the subscript is not a valid integer, returns an error. +// +// This can be adapted to support multidimensional subscripts probably fairly +// easily by retuning a list of ints +func extractArraySubscript(str string) (string, int, bool, error) { + subscriptIdx := strings.Index(str, "[") + if subscriptIdx == -1 { + return "", -1, false, nil + } + + subscript := strings.Split(str[subscriptIdx+1:], "]")[0] + if len(subscript) == 0 { + return "", -1, false, fmt.Errorf("empty subscript not allowed") + } + + index, err := strconv.Atoi(subscript) + if err != nil { + return "", -1, false, fmt.Errorf("expected integer index in key %v", str) + } else if index < 0 { + return "", -1, false, fmt.Errorf("subscript '%v' is invalid. index must be positive", subscript) + } + + return str[:subscriptIdx], index, true, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 5268285b..743f5b8b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -21,22 +21,27 @@ import ( "encoding/json" "fmt" "io" - "path/filepath" + "path" "reflect" + "regexp" "sort" "strings" - "k8s.io/gengo/generator" - "k8s.io/gengo/namer" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/generator" + "k8s.io/gengo/v2/namer" + "k8s.io/gengo/v2/types" openapi "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/klog/v2" ) // This is the comment tag that carries parameters for open API generation. const tagName = "k8s:openapi-gen" +const markerPrefix = "+k8s:validation:" const tagOptional = "optional" +const tagRequired = "required" const tagDefault = "default" // Known values for the tag. @@ -53,11 +58,11 @@ var tempPatchTags = [...]string{ } func getOpenAPITagValue(comments []string) []string { - return types.ExtractCommentTags("+", comments)[tagName] + return gengo.ExtractCommentTags("+", comments)[tagName] } func getSingleTagsValue(comments []string, tag string) (string, error) { - tags, ok := types.ExtractCommentTags("+", comments)[tag] + tags, ok := gengo.ExtractCommentTags("+", comments)[tag] if !ok || len(tags) == 0 { return "", nil } @@ -77,14 +82,25 @@ func hasOpenAPITagValue(comments []string, value string) bool { return false } -// hasOptionalTag returns true if the member has +optional in its comments or -// omitempty in its json tags. -func hasOptionalTag(m *types.Member) bool { - hasOptionalCommentTag := types.ExtractCommentTags( +// isOptional returns error if the member has +optional and +required in +// its comments. If +optional is present it returns true. If +required is present +// it returns false. Otherwise, it returns true if `omitempty` JSON tag is present +func isOptional(m *types.Member) (bool, error) { + hasOptionalCommentTag := gengo.ExtractCommentTags( "+", m.CommentLines)[tagOptional] != nil - hasOptionalJsonTag := strings.Contains( - reflect.StructTag(m.Tags).Get("json"), "omitempty") - return hasOptionalCommentTag || hasOptionalJsonTag + hasRequiredCommentTag := gengo.ExtractCommentTags( + "+", m.CommentLines)[tagRequired] != nil + if hasOptionalCommentTag && hasRequiredCommentTag { + return false, fmt.Errorf("member %s cannot be both optional and required", m.Name) + } else if hasRequiredCommentTag { + return false, nil + } else if hasOptionalCommentTag { + return true, nil + } + + // If neither +optional nor +required is present in the comments, + // infer optional from the json tags. + return strings.Contains(reflect.StructTag(m.Tags).Get("json"), "omitempty"), nil } func apiTypeFilterFunc(c *generator.Context, t *types.Type) bool { @@ -109,18 +125,18 @@ const ( // openApiGen produces a file with auto-generated OpenAPI functions. type openAPIGen struct { - generator.DefaultGen + generator.GoGenerator // TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions. targetPackage string imports namer.ImportTracker } -func newOpenAPIGen(sanitizedName string, targetPackage string) generator.Generator { +func newOpenAPIGen(outputFilename string, targetPackage string) generator.Generator { return &openAPIGen{ - DefaultGen: generator.DefaultGen{ - OptionalName: sanitizedName, + GoGenerator: generator.GoGenerator{ + OutputFilename: outputFilename, }, - imports: generator.NewImportTracker(), + imports: generator.NewImportTrackerForPackage(targetPackage), targetPackage: targetPackage, } } @@ -140,16 +156,6 @@ func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems { } } -func (g *openAPIGen) isOtherPackage(pkg string) bool { - if pkg == g.targetPackage { - return false - } - if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") { - return false - } - return true -} - func (g *openAPIGen) Imports(c *generator.Context) []string { importLines := []string{} for _, singleImport := range g.imports.ImportLines() { @@ -291,7 +297,8 @@ func hasOpenAPIV3OneOfMethod(t *types.Type) bool { // typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name. func typeShortName(t *types.Type) string { - return filepath.Base(t.Name.Package) + "." + t.Name.Name + // `path` vs. `filepath` because packages use '/' + return path.Base(t.Name.Package) + "." + t.Name.Name } func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([]string, error) { @@ -314,7 +321,10 @@ func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([] if name == "" { continue } - if !hasOptionalTag(&m) { + if isOptional, err := isOptional(&m); err != nil { + klog.Errorf("Error when generating: %v, %v\n", name, m) + return required, err + } else if !isOptional { required = append(required, name) } if err = g.generateProperty(&m, t); err != nil { @@ -352,10 +362,76 @@ func (g openAPITypeWriter) generateCall(t *types.Type) error { return g.Error() } +func (g openAPITypeWriter) generateValueValidations(vs *spec.SchemaProps) error { + + if vs == nil { + return nil + } + args := generator.Args{ + "ptrTo": &types.Type{ + Name: types.Name{ + Package: "k8s.io/utils/ptr", + Name: "To", + }}, + "spec": vs, + } + if vs.Minimum != nil { + g.Do("Minimum: $.ptrTo|raw$[float64]($.spec.Minimum$),\n", args) + } + if vs.Maximum != nil { + g.Do("Maximum: $.ptrTo|raw$[float64]($.spec.Maximum$),\n", args) + } + if vs.ExclusiveMinimum { + g.Do("ExclusiveMinimum: true,\n", args) + } + if vs.ExclusiveMaximum { + g.Do("ExclusiveMaximum: true,\n", args) + } + if vs.MinLength != nil { + g.Do("MinLength: $.ptrTo|raw$[int64]($.spec.MinLength$),\n", args) + } + if vs.MaxLength != nil { + g.Do("MaxLength: $.ptrTo|raw$[int64]($.spec.MaxLength$),\n", args) + } + + if vs.MinProperties != nil { + g.Do("MinProperties: $.ptrTo|raw$[int64]($.spec.MinProperties$),\n", args) + } + if vs.MaxProperties != nil { + g.Do("MaxProperties: $.ptrTo|raw$[int64]($.spec.MaxProperties$),\n", args) + } + if len(vs.Pattern) > 0 { + p, err := json.Marshal(vs.Pattern) + if err != nil { + return err + } + g.Do("Pattern: $.$,\n", string(p)) + } + if vs.MultipleOf != nil { + g.Do("MultipleOf: $.ptrTo|raw$[float64]($.spec.MultipleOf$),\n", args) + } + if vs.MinItems != nil { + g.Do("MinItems: $.ptrTo|raw$[int64]($.spec.MinItems$),\n", args) + } + if vs.MaxItems != nil { + g.Do("MaxItems: $.ptrTo|raw$[int64]($.spec.MaxItems$),\n", args) + } + if vs.UniqueItems { + g.Do("UniqueItems: true,\n", nil) + } + + return nil +} + func (g openAPITypeWriter) generate(t *types.Type) error { // Only generate for struct type and ignore the rest switch t.Kind { case types.Struct: + validationSchema, err := ParseCommentTags(t, t.CommentLines, markerPrefix) + if err != nil { + return err + } + hasV2Definition := hasOpenAPIDefinitionMethod(t) hasV2DefinitionTypeAndFormat := hasOpenAPIDefinitionMethods(t) hasV3OneOfTypes := hasOpenAPIV3OneOfMethod(t) @@ -375,10 +451,17 @@ func (g openAPITypeWriter) generate(t *types.Type) error { "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ - "},\n"+ - "},\n"+ - "})\n}\n\n", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) + err = g.generateValueValidations(&validationSchema.SchemaProps) + if err != nil { + return err + } + g.Do("},\n", nil) + if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil { + return err + } + g.Do("},\n", nil) + g.Do("})\n}\n\n", args) return nil case hasV2DefinitionTypeAndFormat && hasV3OneOfTypes: // generate v3 def. @@ -387,20 +470,34 @@ func (g openAPITypeWriter) generate(t *types.Type) error { "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("OneOf:common.GenerateOpenAPIV3OneOfSchema($.type|raw${}.OpenAPIV3OneOfTypes()),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ - "},\n"+ - "},\n"+ - "},", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) + err = g.generateValueValidations(&validationSchema.SchemaProps) + if err != nil { + return err + } + g.Do("},\n", nil) + if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil { + return err + } + g.Do("},\n", nil) + g.Do("},", args) // generate v2 def. g.Do("$.OpenAPIDefinition|raw${\n"+ "Schema: spec.Schema{\n"+ "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ - "},\n"+ - "},\n"+ - "})\n}\n\n", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) + err = g.generateValueValidations(&validationSchema.SchemaProps) + if err != nil { + return err + } + g.Do("},\n", nil) + if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil { + return err + } + g.Do("},\n", nil) + g.Do("})\n}\n\n", args) return nil case hasV2DefinitionTypeAndFormat: g.Do("return $.OpenAPIDefinition|raw${\n"+ @@ -408,18 +505,30 @@ func (g openAPITypeWriter) generate(t *types.Type) error { "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ - "},\n"+ - "},\n"+ - "}\n}\n\n", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) + err = g.generateValueValidations(&validationSchema.SchemaProps) + if err != nil { + return err + } + g.Do("},\n", nil) + if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil { + return err + } + g.Do("},\n", nil) + g.Do("}\n}\n\n", args) return nil case hasV3OneOfTypes: // having v3 oneOf types without custom v2 type or format does not make sense. return fmt.Errorf("type %q has v3 one of types but not v2 type or format", t.Name) } + g.Do("return $.OpenAPIDefinition|raw${\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type: []string{\"object\"},\n", nil) + err = g.generateValueValidations(&validationSchema.SchemaProps) + if err != nil { + return err + } // write members into a temporary buffer, in order to postpone writing out the Properties field. We only do // that if it is not empty. @@ -440,7 +549,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error { g.Do("Required: []string{\"$.$\"},\n", strings.Join(required, "\",\"")) } g.Do("},\n", nil) - if err := g.generateStructExtensions(t); err != nil { + if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil { return err } g.Do("},\n", nil) @@ -473,7 +582,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error { return nil } -func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { +func (g openAPITypeWriter) generateStructExtensions(t *types.Type, otherExtensions map[string]interface{}) error { extensions, errors := parseExtensions(t.CommentLines) // Initially, we will only log struct extension errors. if len(errors) > 0 { @@ -489,11 +598,11 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { } // TODO(seans3): Validate struct extensions here. - g.emitExtensions(extensions, unions) + g.emitExtensions(extensions, unions, otherExtensions) return nil } -func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type) error { +func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type, otherExtensions map[string]interface{}) error { extensions, parseErrors := parseExtensions(m.CommentLines) validationErrors := validateMemberExtensions(extensions, m) errors := append(parseErrors, validationErrors...) @@ -504,13 +613,13 @@ func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *typ klog.V(2).Infof("%s %s\n", errorPrefix, e) } } - g.emitExtensions(extensions, nil) + g.emitExtensions(extensions, nil, otherExtensions) return nil } -func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union) { +func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union, otherExtensions map[string]interface{}) { // If any extensions exist, then emit code to create them. - if len(extensions) == 0 && len(unions) == 0 { + if len(extensions) == 0 && len(unions) == 0 && len(otherExtensions) == 0 { return } g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil) @@ -533,6 +642,16 @@ func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union } g.Do("},\n", nil) } + + if len(otherExtensions) > 0 { + for k, v := range otherExtensions { + g.Do("$.key$: $.value$,\n", map[string]interface{}{ + "key": fmt.Sprintf("%#v", k), + "value": fmt.Sprintf("%#v", v), + }) + } + } + g.Do("},\n},\n", nil) } @@ -553,23 +672,108 @@ func (g openAPITypeWriter) validatePatchTags(m *types.Member, parent *types.Type return nil } -func defaultFromComments(comments []string) (interface{}, error) { - tag, err := getSingleTagsValue(comments, tagDefault) +func defaultFromComments(comments []string, commentPath string, t *types.Type) (interface{}, *types.Name, error) { + var tag string + + for { + var err error + tag, err = getSingleTagsValue(comments, tagDefault) + if err != nil { + return nil, nil, err + } + + if t == nil || len(tag) > 0 { + break + } + + comments = t.CommentLines + commentPath = t.Name.Package + switch t.Kind { + case types.Pointer: + t = t.Elem + case types.Alias: + t = t.Underlying + default: + t = nil + } + } + if tag == "" { - return nil, err + return nil, nil, nil } + var i interface{} - if err := json.Unmarshal([]byte(tag), &i); err != nil { - return nil, fmt.Errorf("failed to unmarshal default: %v", err) + if id, ok := parseSymbolReference(tag, commentPath); ok { + klog.Errorf("%v, %v", id, commentPath) + return nil, &id, nil + } else if err := json.Unmarshal([]byte(tag), &i); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal default: %v", err) + } + return i, nil, nil +} + +var refRE = regexp.MustCompile(`^ref\((?P[^"]+)\)$`) +var refREIdentIndex = refRE.SubexpIndex("reference") + +// parseSymbolReference looks for strings that match one of the following: +// - ref(Ident) +// - ref(pkgpath.Ident) +// If the input string matches either of these, it will return the (optional) +// pkgpath, the Ident, and true. Otherwise it will return empty strings and +// false. +// +// This is borrowed from k8s.io/code-generator. +func parseSymbolReference(s, sourcePackage string) (types.Name, bool) { + matches := refRE.FindStringSubmatch(s) + if len(matches) < refREIdentIndex || matches[refREIdentIndex] == "" { + return types.Name{}, false + } + + contents := matches[refREIdentIndex] + name := types.ParseFullyQualifiedName(contents) + if len(name.Package) == 0 { + name.Package = sourcePackage + } + return name, true +} + +func implementsCustomUnmarshalling(t *types.Type) bool { + switch t.Kind { + case types.Pointer: + unmarshaller, isUnmarshaller := t.Elem.Methods["UnmarshalJSON"] + return isUnmarshaller && unmarshaller.Signature.Receiver.Kind == types.Pointer + case types.Struct: + _, isUnmarshaller := t.Methods["UnmarshalJSON"] + return isUnmarshaller + default: + return false } - return i, nil } func mustEnforceDefault(t *types.Type, omitEmpty bool) (interface{}, error) { + // Treat types with custom unmarshalling as a value + // (Can be alias, struct, or pointer) + if implementsCustomUnmarshalling(t) { + // Since Go JSON deserializer always feeds `null` when present + // to structs with custom UnmarshalJSON, the zero value for + // these structs is also null. + // + // In general, Kubernetes API types with custom marshalling should + // marshal their empty values to `null`. + return nil, nil + } + switch t.Kind { + case types.Alias: + return mustEnforceDefault(t.Underlying, omitEmpty) case types.Pointer, types.Map, types.Slice, types.Array, types.Interface: return nil, nil case types.Struct: + if len(t.Members) == 1 && t.Members[0].Embedded { + // Treat a struct with a single embedded member the same as an alias + return mustEnforceDefault(t.Members[0].Type, omitEmpty) + } + return map[string]interface{}{}, nil case types.Builtin: if !omitEmpty { @@ -585,9 +789,8 @@ func mustEnforceDefault(t *types.Type, omitEmpty bool) (interface{}, error) { } } -func (g openAPITypeWriter) generateDefault(comments []string, t *types.Type, omitEmpty bool) error { - t = resolveAliasAndEmbeddedType(t) - def, err := defaultFromComments(comments) +func (g openAPITypeWriter) generateDefault(comments []string, t *types.Type, omitEmpty bool, commentOwningType *types.Type) error { + def, ref, err := defaultFromComments(comments, commentOwningType.Name.Package, t) if err != nil { return err } @@ -603,6 +806,8 @@ func (g openAPITypeWriter) generateDefault(comments []string, t *types.Type, omi } if def != nil { g.Do("Default: $.$,\n", fmt.Sprintf("%#v", def)) + } else if ref != nil { + g.Do("Default: $.|raw$,\n", &types.Type{Name: *ref}) } return nil } @@ -656,11 +861,15 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) if name == "" { return nil } + validationSchema, err := ParseCommentTags(m.Type, m.CommentLines, markerPrefix) + if err != nil { + return err + } if err := g.validatePatchTags(m, parent); err != nil { return err } g.Do("\"$.$\": {\n", name) - if err := g.generateMemberExtensions(m, parent); err != nil { + if err := g.generateMemberExtensions(m, parent, validationSchema.Extensions); err != nil { return err } g.Do("SchemaProps: spec.SchemaProps{\n", nil) @@ -676,9 +885,13 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) return nil } omitEmpty := strings.Contains(reflect.StructTag(m.Tags).Get("json"), "omitempty") - if err := g.generateDefault(m.CommentLines, m.Type, omitEmpty); err != nil { + if err := g.generateDefault(m.CommentLines, m.Type, omitEmpty, parent); err != nil { return fmt.Errorf("failed to generate default in %v: %v: %v", parent, m.Name, err) } + err = g.generateValueValidations(&validationSchema.SchemaProps) + if err != nil { + return err + } t := resolveAliasAndPtrType(m.Type) // If we can get a openAPI type and format for this type, we consider it to be simple property typeString, format := openapi.OpenAPITypeFormat(t.String()) @@ -686,7 +899,7 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) g.generateSimpleProperty(typeString, format) if enumType, isEnum := g.enumContext.EnumType(m.Type); isEnum { // original type is an enum, add "Enum: " and the values - g.Do("Enum: []interface{}{$.$}", strings.Join(enumType.ValueStrings(), ", ")) + g.Do("Enum: []interface{}{$.$},\n", strings.Join(enumType.ValueStrings(), ", ")) } g.Do("},\n},\n", nil) return nil @@ -721,22 +934,6 @@ func (g openAPITypeWriter) generateReferenceProperty(t *types.Type) { g.Do("Ref: ref(\"$.$\"),\n", t.Name.String()) } -func resolveAliasAndEmbeddedType(t *types.Type) *types.Type { - var prev *types.Type - for prev != t { - prev = t - if t.Kind == types.Alias { - t = t.Underlying - } - if t.Kind == types.Struct { - if len(t.Members) == 1 && t.Members[0].Embedded { - t = t.Members[0].Type - } - } - } - return t -} - func resolveAliasAndPtrType(t *types.Type) *types.Type { var prev *types.Type for prev != t { @@ -762,12 +959,16 @@ func (g openAPITypeWriter) generateMapProperty(t *types.Type) error { g.Do("Type: []string{\"object\"},\n", nil) g.Do("AdditionalProperties: &spec.SchemaOrBool{\nAllows: true,\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) - if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false); err != nil { + if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false, t.Elem); err != nil { return err } typeString, format := openapi.OpenAPITypeFormat(elemType.String()) if typeString != "" { g.generateSimpleProperty(typeString, format) + if enumType, isEnum := g.enumContext.EnumType(t.Elem); isEnum { + // original type is an enum, add "Enum: " and the values + g.Do("Enum: []interface{}{$.$},\n", strings.Join(enumType.ValueStrings(), ", ")) + } g.Do("},\n},\n},\n", nil) return nil } @@ -795,12 +996,16 @@ func (g openAPITypeWriter) generateSliceProperty(t *types.Type) error { elemType := resolveAliasAndPtrType(t.Elem) g.Do("Type: []string{\"array\"},\n", nil) g.Do("Items: &spec.SchemaOrArray{\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) - if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false); err != nil { + if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false, t.Elem); err != nil { return err } typeString, format := openapi.OpenAPITypeFormat(elemType.String()) if typeString != "" { g.generateSimpleProperty(typeString, format) + if enumType, isEnum := g.enumContext.EnumType(t.Elem); isEnum { + // original type is an enum, add "Enum: " and the values + g.Do("Enum: []interface{}{$.$},\n", strings.Join(enumType.ValueStrings(), ", ")) + } g.Do("},\n},\n},\n", nil) return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go index 474d79e8..e4b0f7cd 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go @@ -1,7 +1,8 @@ package rules import ( - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/types" ) const ListTypeIDLTag = "listType" @@ -24,7 +25,7 @@ func (l *ListTypeMissing) Validate(t *types.Type) ([]string, error) { switch t.Kind { case types.Struct: for _, m := range t.Members { - hasListType := types.ExtractCommentTags("+", m.CommentLines)[ListTypeIDLTag] != nil + hasListType := gengo.ExtractCommentTags("+", m.CommentLines)[ListTypeIDLTag] != nil if m.Name == "Items" && m.Type.Kind == types.Slice && hasNamedMember(t, "ListMeta") { if hasListType { @@ -33,7 +34,8 @@ func (l *ListTypeMissing) Validate(t *types.Type) ([]string, error) { continue } - if m.Type.Kind == types.Slice && !hasListType { + // All slice fields must have a list-type tag except []byte + if m.Type.Kind == types.Slice && m.Type.Elem != types.Byte && !hasListType { fields = append(fields, m.Name) continue } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go index 9dfbc319..af30edc5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go @@ -22,7 +22,7 @@ import ( "k8s.io/kube-openapi/pkg/util/sets" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/types" ) var ( @@ -56,24 +56,29 @@ Go field names must be CamelCase. JSON field names must be camelCase. Other than initial letter, the two should almost always match. No underscores nor dashes in either. This rule verifies the convention "Other than capitalization of the initial letter, the two should almost always match." Examples (also in unit test): - Go name | JSON name | match - podSpec false - PodSpec podSpec true - PodSpec PodSpec false - podSpec podSpec false - PodSpec spec false - Spec podSpec false - JSONSpec jsonSpec true - JSONSpec jsonspec false - HTTPJSONSpec httpJSONSpec true + + Go name | JSON name | match + podSpec false + PodSpec podSpec true + PodSpec PodSpec false + podSpec podSpec false + PodSpec spec false + Spec podSpec false + JSONSpec jsonSpec true + JSONSpec jsonspec false + HTTPJSONSpec httpJSONSpec true + NOTE: this validator cannot tell two sequential all-capital words from one word, therefore the case below is also considered matched. - HTTPJSONSpec httpjsonSpec true + + HTTPJSONSpec httpjsonSpec true + NOTE: JSON names in jsonNameBlacklist should skip evaluation - true - podSpec true - podSpec - true - podSpec metadata true + + true + podSpec true + podSpec - true + podSpec metadata true */ type NamesMatch struct{} @@ -114,14 +119,15 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) { // namesMatch evaluates if goName and jsonName match the API rule // TODO: Use an off-the-shelf CamelCase solution instead of implementing this logic. The following existing -// packages have been tried out: -// github.com/markbates/inflect -// github.com/segmentio/go-camelcase -// github.com/iancoleman/strcase -// github.com/fatih/camelcase -// Please see https://github.com/kubernetes/kube-openapi/pull/83#issuecomment-400842314 for more details -// about why they don't satisfy our need. What we need can be a function that detects an acronym at the -// beginning of a string. +// +// packages have been tried out: +// github.com/markbates/inflect +// github.com/segmentio/go-camelcase +// github.com/iancoleman/strcase +// github.com/fatih/camelcase +// Please see https://github.com/kubernetes/kube-openapi/pull/83#issuecomment-400842314 for more details +// about why they don't satisfy our need. What we need can be a function that detects an acronym at the +// beginning of a string. func namesMatch(goName, jsonName string) bool { if jsonNameBlacklist.Has(jsonName) { return true @@ -129,7 +135,7 @@ func namesMatch(goName, jsonName string) bool { if !isAllowedName(goName) || !isAllowedName(jsonName) { return false } - if strings.ToLower(goName) != strings.ToLower(jsonName) { + if !strings.EqualFold(goName, jsonName) { return false } // Go field names must be CamelCase. JSON field names must be camelCase. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go index dd37ad8a..d8387596 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go @@ -20,7 +20,7 @@ import ( "reflect" "strings" - "k8s.io/gengo/types" + "k8s.io/gengo/v2/types" ) // OmitEmptyMatchCase implements APIRule interface. diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/union.go b/vendor/k8s.io/kube-openapi/pkg/generators/union.go index a0281fe4..bfcba1ad 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/union.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/union.go @@ -20,7 +20,8 @@ import ( "fmt" "sort" - "k8s.io/gengo/types" + "k8s.io/gengo/v2" + "k8s.io/gengo/v2/types" ) const tagUnionMember = "union" @@ -141,7 +142,7 @@ func parseEmbeddedUnion(t *types.Type) ([]union, []error) { // embedded types. func parseUnionStruct(t *types.Type) (*union, []error) { errors := []error{} - if types.ExtractCommentTags("+", t.CommentLines)[tagUnionMember] == nil { + if gengo.ExtractCommentTags("+", t.CommentLines)[tagUnionMember] == nil { return nil, nil } @@ -156,14 +157,14 @@ func parseUnionStruct(t *types.Type) (*union, []error) { errors = append(errors, fmt.Errorf("union structures can't have embedded fields: %v.%v", t.Name, m.Name)) continue } - if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { + if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { errors = append(errors, fmt.Errorf("union struct can't have unionDeprecated members: %v.%v", t.Name, m.Name)) continue } - if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { + if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { errors = append(errors, u.setDiscriminator(jsonName)...) } else { - if !hasOptionalTag(&m) { + if optional, err := isOptional(&m); !optional || err != nil { errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name)) } u.addMember(jsonName, m.Name) @@ -186,15 +187,15 @@ func parseUnionMembers(t *types.Type) (*union, []error) { if shouldInlineMembers(&m) { continue } - if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { + if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil { errors = append(errors, u.setDiscriminator(jsonName)...) } - if types.ExtractCommentTags("+", m.CommentLines)[tagUnionMember] != nil { + if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionMember] != nil { errors = append(errors, fmt.Errorf("union tag is not accepted on struct members: %v.%v", t.Name, m.Name)) continue } - if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { - if !hasOptionalTag(&m) { + if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil { + if optional, err := isOptional(&m); !optional || err != nil { errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name)) } u.addMember(jsonName, m.Name) diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 7a2590dc..5fc62977 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -18,36 +18,31 @@ package handler import ( "bytes" - "compress/gzip" "crypto/sha512" - "encoding/json" "fmt" - "mime" "net/http" "strconv" - "sync" "time" "github.com/NYTimes/gziphandler" "github.com/emicklei/go-restful/v3" "github.com/golang/protobuf/proto" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" + "github.com/google/uuid" "github.com/munnerz/goautoneg" + klog "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/builder" + "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" "k8s.io/kube-openapi/pkg/common/restfuladapter" - "k8s.io/kube-openapi/pkg/internal/handler" "k8s.io/kube-openapi/pkg/validation/spec" ) const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv2@68f4ded+protobuf" - mimePbGz = "application/x-gzip" + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v2@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v2.v1.0+protobuf" + subTypeJSON = "json" ) func computeETag(data []byte) string { @@ -57,87 +52,62 @@ func computeETag(data []byte) string { return fmt.Sprintf("%X", sha512.Sum512(data)) } -// OpenAPIService is the service responsible for serving OpenAPI spec. It has -// the ability to safely change the spec while serving it. -type OpenAPIService struct { - // rwMutex protects All members of this service. - rwMutex sync.RWMutex - +type timedSpec struct { + spec []byte lastModified time.Time - - jsonCache handler.HandlerCache - protoCache handler.HandlerCache - etagCache handler.HandlerCache } -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +type OpenAPIService struct { + specCache cached.LastSuccess[*spec.Swagger] + jsonCache cached.Value[timedSpec] + protoCache cached.Value[timedSpec] } // NewOpenAPIService builds an OpenAPIService starting with the given spec. -func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { - o := &OpenAPIService{} - if err := o.UpdateSpec(spec); err != nil { - return nil, err - } - return o, nil +func NewOpenAPIService(swagger *spec.Swagger) *OpenAPIService { + return NewOpenAPIServiceLazy(cached.Static(swagger, uuid.New().String())) } -func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - specBytes, err := o.jsonCache.Get() - if err != nil { - return nil, "", time.Time{}, err - } - etagBytes, err := o.etagCache.Get() - if err != nil { - return nil, "", time.Time{}, err - } - return specBytes, string(etagBytes), o.lastModified, nil -} - -func (o *OpenAPIService) getSwaggerPbBytes() ([]byte, string, time.Time, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - specPb, err := o.protoCache.Get() - if err != nil { - return nil, "", time.Time{}, err - } - etagBytes, err := o.etagCache.Get() - if err != nil { - return nil, "", time.Time{}, err - } - return specPb, string(etagBytes), o.lastModified, nil -} +// NewOpenAPIServiceLazy builds an OpenAPIService from lazy spec. +func NewOpenAPIServiceLazy(swagger cached.Value[*spec.Swagger]) *OpenAPIService { + o := &OpenAPIService{} + o.UpdateSpecLazy(swagger) -func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - o.jsonCache = o.jsonCache.New(func() ([]byte, error) { - return json.Marshal(openapiSpec) - }) - o.protoCache = o.protoCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() + o.jsonCache = cached.Transform[*spec.Swagger](func(spec *spec.Swagger, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err + } + json, err := spec.MarshalJSON() + if err != nil { + return timedSpec{}, "", err + } + return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil + }, &o.specCache) + o.protoCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { if err != nil { - return nil, err + return timedSpec{}, "", err } - return ToProtoBinary(json) - }) - o.etagCache = o.etagCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() + proto, err := ToProtoBinary(ts.spec) if err != nil { - return nil, err + return timedSpec{}, "", err } - return []byte(computeETag(json)), nil - }) - o.lastModified = time.Now() + // We can re-use the same etag as json because of the Vary header. + return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil + }, o.jsonCache) + return o +} +func (o *OpenAPIService) UpdateSpec(swagger *spec.Swagger) error { + o.UpdateSpecLazy(cached.Static(swagger, uuid.New().String())) return nil } +func (o *OpenAPIService) UpdateSpecLazy(swagger cached.Value[*spec.Swagger]) { + o.specCache.Store(swagger) +} + func ToProtoBinary(json []byte) ([]byte, error) { document, err := openapi_v2.ParseDocument(json) if err != nil { @@ -146,34 +116,26 @@ func ToProtoBinary(json []byte) ([]byte, error) { return proto.Marshal(document) } -func toGzip(data []byte) []byte { - var buf bytes.Buffer - zw := gzip.NewWriter(&buf) - zw.Write(data) - zw.Close() - return buf.Bytes() -} - // RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. // // Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead. -func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) { - o, err := NewOpenAPIService(spec) - if err != nil { - return nil, err - } - return o, o.RegisterOpenAPIVersionedService(servePath, handler) +func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) *OpenAPIService { + o := NewOpenAPIService(spec) + o.RegisterOpenAPIVersionedService(servePath, handler) + return o } // RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. -func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error { +func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) { accepted := []struct { - Type string - SubType string - GetDataAndETag func() ([]byte, string, time.Time, error) + Type string + SubType string + ReturnedContentType string + GetDataAndEtag cached.Value[timedSpec] }{ - {"application", "json", o.getSwaggerBytes}, - {"application", "com.github.proto-openapi.spec.v2@v1.0+protobuf", o.getSwaggerPbBytes}, + {"application", subTypeJSON, "application/" + subTypeJSON, o.jsonCache}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf, o.protoCache}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf, o.protoCache}, } handler.Handle(servePath, gziphandler.GzipHandler(http.HandlerFunc( @@ -192,21 +154,23 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl if clause.SubType != accepts.SubType && clause.SubType != "*" { continue } - // serve the first matching media type in the sorted clause list - data, etag, lastModified, err := accepts.GetDataAndETag() + ts, etag, err := accepts.GetDataAndEtag.Get() if err != nil { klog.Errorf("Error in OpenAPI handler: %s", err) // only return a 503 if we have no older cache data to serve - if data == nil { + if ts.spec == nil { w.WriteHeader(http.StatusServiceUnavailable) return } } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag w.Header().Set("Etag", strconv.Quote(etag)) // ServeContent will take care of caching using eTag. - http.ServeContent(w, r, servePath, lastModified, bytes.NewReader(data)) + http.ServeContent(w, r, servePath, ts.lastModified, bytes.NewReader(ts.spec)) return } } @@ -215,8 +179,6 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl return }), )) - - return nil } // BuildAndRegisterOpenAPIVersionedService builds the spec and registers a handler to provide access to it. @@ -234,9 +196,7 @@ func BuildAndRegisterOpenAPIVersionedServiceFromRoutes(servePath string, routeCo if err != nil { return nil, err } - o, err := NewOpenAPIService(spec) - if err != nil { - return nil, err - } - return o, o.RegisterOpenAPIVersionedService(servePath, handler) + o := NewOpenAPIService(spec) + o.RegisterOpenAPIVersionedService(servePath, handler) + return o, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index ec4adcde..fc456348 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -21,35 +21,29 @@ import ( "crypto/sha512" "encoding/json" "fmt" - "mime" "net/http" "net/url" "path" - "sort" "strconv" "strings" "sync" "time" "github.com/golang/protobuf/proto" - openapi_v3 "github.com/google/gnostic/openapiv3" + openapi_v3 "github.com/google/gnostic-models/openapiv3" + "github.com/google/uuid" "github.com/munnerz/goautoneg" + + "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/internal/handler" "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" ) const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf" - mimePbGz = "application/x-gzip" - - subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" - subTypeJSON = "json" + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf" + subTypeJSON = "json" ) // OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3 @@ -65,29 +59,63 @@ type OpenAPIV3DiscoveryGroupVersion struct { ServerRelativeURL string `json:"serverRelativeURL"` } -// OpenAPIService is the service responsible for serving OpenAPI spec. It has -// the ability to safely change the spec while serving it. -type OpenAPIService struct { - // rwMutex protects All members of this service. - rwMutex sync.RWMutex +func ToV3ProtoBinary(json []byte) ([]byte, error) { + document, err := openapi_v3.ParseDocument(json) + if err != nil { + return nil, err + } + return proto.Marshal(document) +} + +type timedSpec struct { + spec []byte lastModified time.Time - v3Schema map[string]*OpenAPIV3Group } -type OpenAPIV3Group struct { - rwMutex sync.RWMutex +// This type is protected by the lock on OpenAPIService. +type openAPIV3Group struct { + specCache cached.LastSuccess[*spec3.OpenAPI] + pbCache cached.Value[timedSpec] + jsonCache cached.Value[timedSpec] +} - lastModified time.Time +func newOpenAPIV3Group() *openAPIV3Group { + o := &openAPIV3Group{} + o.jsonCache = cached.Transform[*spec3.OpenAPI](func(spec *spec3.OpenAPI, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err + } + json, err := json.Marshal(spec) + if err != nil { + return timedSpec{}, "", err + } + return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil + }, &o.specCache) + o.pbCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err + } + proto, err := ToV3ProtoBinary(ts.spec) + if err != nil { + return timedSpec{}, "", err + } + return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil + }, o.jsonCache) + return o +} - pbCache handler.HandlerCache - jsonCache handler.HandlerCache - etagCache handler.HandlerCache +func (o *openAPIV3Group) UpdateSpec(openapi cached.Value[*spec3.OpenAPI]) { + o.specCache.Store(openapi) } -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) +// OpenAPIService is the service responsible for serving OpenAPI spec. It has +// the ability to safely change the spec while serving it. +type OpenAPIService struct { + // Mutex protects the schema map. + mutex sync.Mutex + v3Schema map[string]*openAPIV3Group + + discoveryCache cached.LastSuccess[timedSpec] } func computeETag(data []byte) string { @@ -106,92 +134,90 @@ func constructServerRelativeURL(gvString, etag string) string { } // NewOpenAPIService builds an OpenAPIService starting with the given spec. -func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) { +func NewOpenAPIService() *OpenAPIService { o := &OpenAPIService{} - o.v3Schema = make(map[string]*OpenAPIV3Group) - return o, nil + o.v3Schema = make(map[string]*openAPIV3Group) + // We're not locked because we haven't shared the structure yet. + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) + return o } -func (o *OpenAPIService) getGroupBytes() ([]byte, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() - keys := make([]string, len(o.v3Schema)) - i := 0 - for k := range o.v3Schema { - keys[i] = k - i++ +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] { + caches := make(map[string]cached.Value[timedSpec], len(o.v3Schema)) + for gvName, group := range o.v3Schema { + caches[gvName] = group.jsonCache } - - sort.Strings(keys) - discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} - for gvString, groupVersion := range o.v3Schema { - etagBytes, err := groupVersion.etagCache.Get() - if err != nil { - return nil, err + return cached.Merge(func(results map[string]cached.Result[timedSpec]) (timedSpec, string, error) { + discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} + for gvName, result := range results { + if result.Err != nil { + return timedSpec{}, "", result.Err + } + discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ + ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), + } } - discovery.Paths[gvString] = OpenAPIV3DiscoveryGroupVersion{ - ServerRelativeURL: constructServerRelativeURL(gvString, string(etagBytes)), + j, err := json.Marshal(discovery) + if err != nil { + return timedSpec{}, "", err } - } - j, err := json.Marshal(discovery) - if err != nil { - return nil, err - } - return j, nil + return timedSpec{spec: j, lastModified: time.Now()}, computeETag(j), nil + }, caches) } func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]byte, string, time.Time, error) { - o.rwMutex.RLock() - defer o.rwMutex.RUnlock() + o.mutex.Lock() + defer o.mutex.Unlock() v, ok := o.v3Schema[group] if !ok { return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) } - if getType == subTypeJSON { - specBytes, err := v.jsonCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specBytes, string(etagBytes), v.lastModified, err - } else if getType == subTypeProtobuf { - specPb, err := v.pbCache.Get() - if err != nil { - return nil, "", v.lastModified, err - } - etagBytes, err := v.etagCache.Get() - return specPb, string(etagBytes), v.lastModified, err + switch getType { + case subTypeJSON: + ts, etag, err := v.jsonCache.Get() + return ts.spec, etag, ts.lastModified, err + case subTypeProtobuf, subTypeProtobufDeprecated: + ts, etag, err := v.pbCache.Get() + return ts.spec, etag, ts.lastModified, err + default: + return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } - return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } -func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - +// UpdateGroupVersionLazy adds or updates an existing group with the new cached. +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Value[*spec3.OpenAPI]) { + o.mutex.Lock() + defer o.mutex.Unlock() if _, ok := o.v3Schema[group]; !ok { - o.v3Schema[group] = &OpenAPIV3Group{} + o.v3Schema[group] = newOpenAPIV3Group() + // Since there is a new item, we need to re-build the cache map. + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } - return o.v3Schema[group].UpdateSpec(openapi) + o.v3Schema[group].UpdateSpec(openapi) +} + +func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { + o.UpdateGroupVersionLazy(group, cached.Static(openapi, uuid.New().String())) } func (o *OpenAPIService) DeleteGroupVersion(group string) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() + o.mutex.Lock() + defer o.mutex.Unlock() delete(o.v3Schema, group) + // Rebuild the merge cache map since the items have changed. + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } -func ToV3ProtoBinary(json []byte) ([]byte, error) { - document, err := openapi_v3.ParseDocument(json) +func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { + ts, etag, err := o.discoveryCache.Get() if err != nil { - return nil, err + klog.Errorf("Error serving discovery: %s", err) + w.WriteHeader(http.StatusInternalServerError) + return } - return proto.Marshal(document) -} - -func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - data, _ := o.getGroupBytes() - http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) + w.Header().Set("Etag", strconv.Quote(etag)) + w.Header().Set("Content-Type", "application/json") + http.ServeContent(w, r, "/openapi/v3", ts.lastModified, bytes.NewReader(ts.spec)) } func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { @@ -210,11 +236,13 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque } accepted := []struct { - Type string - SubType string + Type string + SubType string + ReturnedContentType string }{ - {"application", subTypeJSON}, - {"application", subTypeProtobuf}, + {"application", subTypeJSON, "application/" + subTypeJSON}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf}, } for _, clause := range clauses { @@ -229,6 +257,9 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque if err != nil { return } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag w.Header().Set("Etag", strconv.Quote(etag)) @@ -262,30 +293,3 @@ func (o *OpenAPIService) RegisterOpenAPIV3VersionedService(servePath string, han handler.HandlePrefix(servePath+"/", http.HandlerFunc(o.HandleGroupVersion)) return nil } - -func (o *OpenAPIV3Group) UpdateSpec(openapi *spec3.OpenAPI) (err error) { - o.rwMutex.Lock() - defer o.rwMutex.Unlock() - - o.jsonCache = o.jsonCache.New(func() ([]byte, error) { - return json.Marshal(openapi) - }) - o.pbCache = o.pbCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return ToV3ProtoBinary(json) - }) - // TODO: This forces a json marshal of corresponding group-versions. - // We should look to replace this with a faster hashing mechanism. - o.etagCache = o.etagCache.New(func() ([]byte, error) { - json, err := o.jsonCache.Get() - if err != nil { - return nil, err - } - return []byte(computeETag(json)), nil - }) - o.lastModified = time.Now() - return nil -} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go index 3ff3c8d8..da5485f6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -18,3 +18,8 @@ package internal // Used by tests to selectively disable experimental JSON unmarshaler var UseOptimizedJSONUnmarshaling bool = true +var UseOptimizedJSONUnmarshalingV3 bool = true + +// Used by tests to selectively disable experimental JSON marshaler +var UseOptimizedJSONMarshaling bool = true +var UseOptimizedJSONMarshalingV3 bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go b/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go deleted file mode 100644 index e128c26e..00000000 --- a/vendor/k8s.io/kube-openapi/pkg/internal/handler/handler_cache.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package handler - -import ( - "sync" -) - -// HandlerCache represents a lazy cache for generating a byte array -// It is used to lazily marshal OpenAPI v2/v3 and lazily generate the ETag -type HandlerCache struct { - BuildCache func() ([]byte, error) - once sync.Once - bytes []byte - err error -} - -// Get either returns the cached value or calls BuildCache() once before caching and returning -// its results. If BuildCache returns an error, the last valid value for the cache (from prior -// calls to New()) is used instead if possible. -func (c *HandlerCache) Get() ([]byte, error) { - c.once.Do(func() { - bytes, err := c.BuildCache() - // if there is an error updating the cache, there can be situations where - // c.bytes contains a valid value (carried over from the previous update) - // but c.err is also not nil; the cache user is expected to check for this - c.err = err - if c.err == nil { - // don't override previous spec if we had an error - c.bytes = bytes - } - }) - return c.bytes, c.err -} - -// New creates a new HandlerCache for situations where a cache refresh is needed. -// This function is not thread-safe and should not be called at the same time as Get(). -func (c *HandlerCache) New(cacheBuilder func() ([]byte, error)) HandlerCache { - return HandlerCache{ - bytes: c.bytes, - BuildCache: cacheBuilder, - } -} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go new file mode 100644 index 00000000..7393bacf --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "github.com/go-openapi/jsonreference" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// DeterministicMarshal calls the jsonv2 library with the deterministic +// flag in order to have stable marshaling. +func DeterministicMarshal(in any) ([]byte, error) { + return jsonv2.MarshalOptions{Deterministic: true}.Marshal(jsonv2.EncodeOptions{}, in) +} + +// JSONRefFromMap populates a json reference object if the map v contains a $ref key. +func JSONRefFromMap(jsonRef *jsonreference.Ref, v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *jsonRef = ref + } + } + return nil +} + +// SanitizeExtensions sanitizes the input map such that non extension +// keys (non x-*, X-*) keys are dropped from the map. Returns the new +// modified map, or nil if the map is now empty. +func SanitizeExtensions(e map[string]interface{}) map[string]interface{} { + for k := range e { + if !IsExtensionKey(k) { + delete(e, k) + } + } + if len(e) == 0 { + e = nil + } + return e +} + +// IsExtensionKey returns true if the input string is of format x-* or X-* +func IsExtensionKey(k string) bool { + return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go index febde20f..e6c6216f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go @@ -34,6 +34,13 @@ type MarshalOptions struct { // unknown JSON object members. DiscardUnknownMembers bool + // Deterministic specifies that the same input value will be serialized + // as the exact same output bytes. Different processes of + // the same program will serialize equal values to the same bytes, + // but different versions of the same program are not guaranteed + // to produce the exact same sequence of bytes. + Deterministic bool + // formatDepth is the depth at which we respect the format flag. formatDepth int // format is custom formatting for the value at the specified depth. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go index 204d0648..c62b1f32 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go @@ -62,7 +62,7 @@ func unmarshalValueAny(uo UnmarshalOptions, dec *Decoder) (any, error) { } return dec.stringCache.make(val), nil case '0': - fv, _ := parseFloat(val, 64) // ignore error since readValue gaurantees val is valid + fv, _ := parseFloat(val, 64) // ignore error since readValue guarantees val is valid return fv, nil default: panic("BUG: invalid kind: " + k.String()) @@ -99,13 +99,32 @@ func marshalObjectAny(mo MarshalOptions, enc *Encoder, obj map[string]any) error if !enc.options.AllowInvalidUTF8 { enc.tokens.last.disableNamespace() } - for name, val := range obj { - if err := enc.WriteToken(String(name)); err != nil { - return err + if !mo.Deterministic || len(obj) <= 1 { + for name, val := range obj { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } } - if err := marshalValueAny(mo, enc, val); err != nil { - return err + } else { + names := getStrings(len(obj)) + var i int + for name := range obj { + (*names)[i] = name + i++ + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, obj[name]); err != nil { + return err + } } + putStrings(names) } if err := enc.WriteToken(ObjectEnd); err != nil { return err diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go index fcf3d500..fd26eba3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go @@ -5,6 +5,7 @@ package json import ( + "bytes" "encoding/base32" "encoding/base64" "encoding/hex" @@ -12,6 +13,7 @@ import ( "fmt" "math" "reflect" + "sort" "strconv" "sync" ) @@ -228,13 +230,7 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { } } val := enc.UnusedBuffer() - var b []byte - if va.Kind() == reflect.Array { - // TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice. - b = va.Slice(0, va.Len()).Bytes() - } else { - b = va.Bytes() - } + b := va.Bytes() n := len(`"`) + encodedLen(len(b)) + len(`"`) if cap(val) < n { val = make([]byte, n) @@ -248,19 +244,19 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { } unmarshalDefault := fncs.unmarshal fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { - decode, decodedLen := decodeBase64, decodedLenBase64 + decode, decodedLen, encodedLen := decodeBase64, decodedLenBase64, encodedLenBase64 if uo.format != "" && uo.formatDepth == dec.tokens.depth() { switch uo.format { case "base64": - decode, decodedLen = decodeBase64, decodedLenBase64 + decode, decodedLen, encodedLen = decodeBase64, decodedLenBase64, encodedLenBase64 case "base64url": - decode, decodedLen = decodeBase64URL, decodedLenBase64URL + decode, decodedLen, encodedLen = decodeBase64URL, decodedLenBase64URL, encodedLenBase64URL case "base32": - decode, decodedLen = decodeBase32, decodedLenBase32 + decode, decodedLen, encodedLen = decodeBase32, decodedLenBase32, encodedLenBase32 case "base32hex": - decode, decodedLen = decodeBase32Hex, decodedLenBase32Hex + decode, decodedLen, encodedLen = decodeBase32Hex, decodedLenBase32Hex, encodedLenBase32Hex case "base16", "hex": - decode, decodedLen = decodeBase16, decodedLenBase16 + decode, decodedLen, encodedLen = decodeBase16, decodedLenBase16, encodedLenBase16 case "array": uo.format = "" return unmarshalDefault(uo, dec, va) @@ -290,23 +286,28 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { n-- } n = decodedLen(n) - var b []byte + b := va.Bytes() if va.Kind() == reflect.Array { - // TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice. - b = va.Slice(0, va.Len()).Bytes() if n != len(b) { err := fmt.Errorf("decoded base64 length of %d mismatches array length of %d", n, len(b)) return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } } else { - b = va.Bytes() if b == nil || cap(b) < n { b = make([]byte, n) } else { b = b[:n] } } - if _, err := decode(b, val); err != nil { + n2, err := decode(b, val) + if err == nil && len(val) != encodedLen(n2) { + // TODO(https://go.dev/issue/53845): RFC 4648, section 3.3, + // specifies that non-alphabet characters must be rejected. + // Unfortunately, the "base32" and "base64" packages allow + // '\r' and '\n' characters by default. + err = errors.New("illegal data at input byte " + strconv.Itoa(bytes.IndexAny(val, "\r\n"))) + } + if err != nil { return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } if va.Kind() == reflect.Slice { @@ -412,7 +413,7 @@ func makeUintArshaler(t reflect.Type) *arshaler { return nil } - x := math.Float64frombits(uint64(va.Uint())) + x := math.Float64frombits(va.Uint()) return enc.writeNumber(x, rawUintNumber, mo.StringifyNumbers) } fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { @@ -450,7 +451,7 @@ func makeUintArshaler(t reflect.Type) *arshaler { err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrRange) return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } - va.SetUint(uint64(n)) + va.SetUint(n) return nil } return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} @@ -549,23 +550,9 @@ func makeFloatArshaler(t reflect.Type) *arshaler { return &fncs } -var mapIterPool = sync.Pool{ - New: func() any { return new(reflect.MapIter) }, -} - -func getMapIter(mv reflect.Value) *reflect.MapIter { - iter := mapIterPool.Get().(*reflect.MapIter) - iter.Reset(mv) - return iter -} -func putMapIter(iter *reflect.MapIter) { - iter.Reset(reflect.Value{}) // allow underlying map to be garbage collected - mapIterPool.Put(iter) -} - func makeMapArshaler(t reflect.Type) *arshaler { // NOTE: The logic below disables namespaces for tracking duplicate names - // when handling map keys with a unique represention. + // when handling map keys with a unique representation. // NOTE: Values retrieved from a map are not addressable, // so we shallow copy the values to make them addressable and @@ -641,24 +628,76 @@ func makeMapArshaler(t reflect.Type) *arshaler { enc.tokens.last.disableNamespace() } - // NOTE: Map entries are serialized in a non-deterministic order. - // Users that need stable output should call RawValue.Canonicalize. - // TODO(go1.19): Remove use of a sync.Pool with reflect.MapIter. - // Calling reflect.Value.MapRange no longer allocates. - // See https://go.dev/cl/400675. - iter := getMapIter(va.Value) - defer putMapIter(iter) - for iter.Next() { - k.SetIterKey(iter) - if err := marshalKey(mko, enc, k); err != nil { - // TODO: If err is errMissingName, then wrap it as a - // SemanticError since this key type cannot be serialized - // as a JSON string. - return err + switch { + case !mo.Deterministic || n <= 1: + for iter := va.Value.MapRange(); iter.Next(); { + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + v.SetIterValue(iter) + if err := marshalVal(mo, enc, v); err != nil { + return err + } } - v.SetIterValue(iter) - if err := marshalVal(mo, enc, v); err != nil { - return err + case !nonDefaultKey && t.Key().Kind() == reflect.String: + names := getStrings(n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + k.SetIterKey(iter) + (*names)[i] = k.String() + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + k.SetString(name) + v.Set(va.MapIndex(k.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + putStrings(names) + default: + type member struct { + name string // unquoted name + key addressableValue + } + members := make([]member, n) + keys := reflect.MakeSlice(reflect.SliceOf(t.Key()), n, n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + // Marshal the member name. + k := addressableValue{keys.Index(i)} // indexed slice element is always addressable + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + name := enc.unwriteOnlyObjectMemberName() + members[i] = member{name, k} + } + // TODO: If AllowDuplicateNames is enabled, then sort according + // to reflect.Value as well if the names are equal. + // See internal/fmtsort. + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Slice(members, func(i, j int) bool { + return lessUTF16(members[i].name, members[j].name) + }) + for _, member := range members { + if err := enc.WriteToken(String(member.name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + v.Set(va.MapIndex(member.key.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } } } } @@ -856,7 +895,7 @@ func makeStructArshaler(t reflect.Type) *arshaler { // 2. The object namespace is guaranteed to be disabled. // 3. The object name is guaranteed to be valid and pre-escaped. // 4. There is no need to flush the buffer (for unwrite purposes). - // 5. There is no possibility of an error occuring. + // 5. There is no possibility of an error occurring. if optimizeCommon { // Append any delimiters or optional whitespace. if enc.tokens.last.length() > 0 { @@ -996,7 +1035,7 @@ func makeStructArshaler(t reflect.Type) *arshaler { if fields.inlinedFallback == nil { // Skip unknown value since we have no place to store it. - if err := dec.skipValue(); err != nil { + if err := dec.SkipValue(); err != nil { return err } } else { diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go index 7476eda3..258a9824 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go @@ -5,6 +5,7 @@ package json import ( + "bytes" "errors" "reflect" ) @@ -89,35 +90,61 @@ func marshalInlinedFallbackAll(mo MarshalOptions, enc *Encoder, va addressableVa } return nil } else { - if v.Len() == 0 { + m := v // must be a map[string]V + n := m.Len() + if n == 0 { return nil } - m := v + mk := newAddressableValue(stringType) mv := newAddressableValue(m.Type().Elem()) - for iter := m.MapRange(); iter.Next(); { - b, err := appendString(enc.UnusedBuffer(), iter.Key().String(), !enc.options.AllowInvalidUTF8, nil) + marshalKey := func(mk addressableValue) error { + b, err := appendString(enc.UnusedBuffer(), mk.String(), !enc.options.AllowInvalidUTF8, nil) if err != nil { return err } if insertUnquotedName != nil { - isVerbatim := consumeSimpleString(b) == len(b) + isVerbatim := bytes.IndexByte(b, '\\') < 0 name := unescapeStringMayCopy(b, isVerbatim) if !insertUnquotedName(name) { return &SyntacticError{str: "duplicate name " + string(b) + " in object"} } } - if err := enc.WriteValue(b); err != nil { - return err + return enc.WriteValue(b) + } + marshalVal := f.fncs.marshal + if mo.Marshalers != nil { + marshalVal, _ = mo.Marshalers.lookup(marshalVal, mv.Type()) + } + if !mo.Deterministic || n <= 1 { + for iter := m.MapRange(); iter.Next(); { + mk.SetIterKey(iter) + if err := marshalKey(mk); err != nil { + return err + } + mv.Set(iter.Value()) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } } - - mv.Set(iter.Value()) - marshal := f.fncs.marshal - if mo.Marshalers != nil { - marshal, _ = mo.Marshalers.lookup(marshal, mv.Type()) + } else { + names := getStrings(n) + for i, iter := 0, m.Value.MapRange(); i < n && iter.Next(); i++ { + mk.SetIterKey(iter) + (*names)[i] = mk.String() } - if err := marshal(mo, enc, mv); err != nil { - return err + names.Sort() + for _, name := range *names { + mk.SetString(name) + if err := marshalKey(mk); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use mv.SetMapIndexOf. + mv.Set(m.MapIndex(mk.Value)) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } } + putStrings(names) } return nil } @@ -162,7 +189,7 @@ func unmarshalInlinedFallbackNext(uo UnmarshalOptions, dec *Decoder, va addressa } else { name := string(unquotedName) // TODO: Intern this? - m := v + m := v // must be a map[string]V if m.IsNil() { m.Set(reflect.MakeMap(m.Type())) } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go index ef4e1f5e..20899c86 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go @@ -21,8 +21,8 @@ var ( ) // MarshalerV1 is implemented by types that can marshal themselves. -// It is recommended that types implement MarshalerV2 unless -// the implementation is trying to avoid a hard dependency on this package. +// It is recommended that types implement MarshalerV2 unless the implementation +// is trying to avoid a hard dependency on the "jsontext" package. // // It is recommended that implementations return a buffer that is safe // for the caller to retain and potentially mutate. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go index 22e80222..fc8d5b00 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go @@ -5,6 +5,7 @@ package json import ( + "errors" "fmt" "reflect" "strings" @@ -85,25 +86,39 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { fncs.nonDefault = true fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { format := time.RFC3339Nano + isRFC3339 := true if mo.format != "" && mo.formatDepth == enc.tokens.depth() { var err error - format, err = checkTimeFormat(mo.format) + format, isRFC3339, err = checkTimeFormat(mo.format) if err != nil { return &SemanticError{action: "marshal", GoType: t, Err: err} } } tt := va.Interface().(time.Time) - if y := tt.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See https://go.dev/issue/4556#c15 for more discussion. - err := fmt.Errorf("year %d outside of range [0,9999]", y) - return &SemanticError{action: "marshal", GoType: t, Err: err} - } b := enc.UnusedBuffer() b = append(b, '"') b = tt.AppendFormat(b, format) b = append(b, '"') + if isRFC3339 { + // Not all Go timestamps can be represented as valid RFC 3339. + // Explicitly check for these edge cases. + // See https://go.dev/issue/4556 and https://go.dev/issue/54580. + var err error + switch b := b[len(`"`) : len(b)-len(`"`)]; { + case b[len("9999")] != '-': // year must be exactly 4 digits wide + err = errors.New("year outside of range [0,9999]") + case b[len(b)-1] != 'Z': + c := b[len(b)-len("Z07:00")] + if ('0' <= c && c <= '9') || parseDec2(b[len(b)-len("07:00"):]) >= 24 { + err = errors.New("timezone hour outside of range [0,23]") + } + } + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteValue(b) // RFC 3339 never needs JSON escaping + } // The format may contain special characters that need escaping. // Verify that the result is a valid JSON string (common case), // otherwise escape the string correctly (slower case). @@ -113,10 +128,11 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { return enc.WriteValue(b) } fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { - format := time.RFC3339Nano + format := time.RFC3339 + isRFC3339 := true if uo.format != "" && uo.formatDepth == dec.tokens.depth() { var err error - format, err = checkTimeFormat(uo.format) + format, isRFC3339, err = checkTimeFormat(uo.format) if err != nil { return &SemanticError{action: "unmarshal", GoType: t, Err: err} } @@ -136,6 +152,29 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { case '"': val = unescapeStringMayCopy(val, flags.isVerbatim()) tt2, err := time.Parse(format, string(val)) + if isRFC3339 && err == nil { + // TODO(https://go.dev/issue/54580): RFC 3339 specifies + // the exact grammar of a valid timestamp. However, + // the parsing functionality in "time" is too loose and + // incorrectly accepts invalid timestamps as valid. + // Remove these manual checks when "time" checks it for us. + newParseError := func(layout, value, layoutElem, valueElem, message string) error { + return &time.ParseError{Layout: layout, Value: value, LayoutElem: layoutElem, ValueElem: valueElem, Message: message} + } + switch { + case val[len("2006-01-02T")+1] == ':': // hour must be two digits + err = newParseError(format, string(val), "15", string(val[len("2006-01-02T"):][:1]), "") + case val[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + err = newParseError(format, string(val), ".", ",", "") + case val[len(val)-1] != 'Z': + switch { + case parseDec2(val[len(val)-len("07:00"):]) >= 24: // timezone hour must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone hour out of range") + case parseDec2(val[len(val)-len("00"):]) >= 60: // timezone minute must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone minute out of range") + } + } + } if err != nil { return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } @@ -149,48 +188,54 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { return fncs } -func checkTimeFormat(format string) (string, error) { +func checkTimeFormat(format string) (string, bool, error) { // We assume that an exported constant in the time package will // always start with an uppercase ASCII letter. if len(format) > 0 && 'A' <= format[0] && format[0] <= 'Z' { switch format { case "ANSIC": - return time.ANSIC, nil + return time.ANSIC, false, nil case "UnixDate": - return time.UnixDate, nil + return time.UnixDate, false, nil case "RubyDate": - return time.RubyDate, nil + return time.RubyDate, false, nil case "RFC822": - return time.RFC822, nil + return time.RFC822, false, nil case "RFC822Z": - return time.RFC822Z, nil + return time.RFC822Z, false, nil case "RFC850": - return time.RFC850, nil + return time.RFC850, false, nil case "RFC1123": - return time.RFC1123, nil + return time.RFC1123, false, nil case "RFC1123Z": - return time.RFC1123Z, nil + return time.RFC1123Z, false, nil case "RFC3339": - return time.RFC3339, nil + return time.RFC3339, true, nil case "RFC3339Nano": - return time.RFC3339Nano, nil + return time.RFC3339Nano, true, nil case "Kitchen": - return time.Kitchen, nil + return time.Kitchen, false, nil case "Stamp": - return time.Stamp, nil + return time.Stamp, false, nil case "StampMilli": - return time.StampMilli, nil + return time.StampMilli, false, nil case "StampMicro": - return time.StampMicro, nil + return time.StampMicro, false, nil case "StampNano": - return time.StampNano, nil + return time.StampNano, false, nil default: // Reject any format that is an exported Go identifier in case // new format constants are added to the time package. if strings.TrimFunc(format, isLetterOrDigit) == "" { - return "", fmt.Errorf("undefined format layout: %v", format) + return "", false, fmt.Errorf("undefined format layout: %v", format) } } } - return format, nil + return format, false, nil +} + +// parseDec2 parses b as an unsigned, base-10, 2-digit number. +// It panics if len(b) < 2. The result is undefined if digits are not base-10. +func parseDec2(b []byte) byte { + return 10*(b[0]-'0') + (b[1] - '0') } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go index 998ad68f..0d68b323 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go @@ -347,9 +347,9 @@ func (d *Decoder) PeekKind() Kind { return next } -// skipValue is semantically equivalent to calling ReadValue and discarding +// SkipValue is semantically equivalent to calling ReadValue and discarding // the result except that memory is not wasted trying to hold the entire result. -func (d *Decoder) skipValue() error { +func (d *Decoder) SkipValue() error { switch d.PeekKind() { case '{', '[': // For JSON objects and arrays, keep skipping all tokens @@ -374,7 +374,7 @@ func (d *Decoder) skipValue() error { } // ReadToken reads the next Token, advancing the read offset. -// The returned token is only valid until the next Peek or Read call. +// The returned token is only valid until the next Peek, Read, or Skip call. // It returns io.EOF if there are no more tokens. func (d *Decoder) ReadToken() (Token, error) { // Determine the next kind. @@ -585,7 +585,7 @@ func (f valueFlags) isCanonical() bool { return f&stringNonCanonical == 0 } // ReadValue returns the next raw JSON value, advancing the read offset. // The value is stripped of any leading or trailing whitespace. -// The returned value is only valid until the next Peek or Read call and +// The returned value is only valid until the next Peek, Read, or Skip call and // may not be mutated while the Decoder remains in use. // If the decoder is currently at the end token for an object or array, // then it reports a SyntacticError and the internal state remains unchanged. @@ -1013,7 +1013,7 @@ func (d *Decoder) InputOffset() int64 { // UnreadBuffer returns the data remaining in the unread buffer, // which may contain zero or more bytes. // The returned buffer must not be mutated while Decoder continues to be used. -// The buffer contents are valid until the next Peek or Read call. +// The buffer contents are valid until the next Peek, Read, or Skip call. func (d *Decoder) UnreadBuffer() []byte { return d.unreadBuffer() } @@ -1213,7 +1213,7 @@ func consumeStringResumable(flags *valueFlags, b []byte, resumeOffset int, valid return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} } // Only certain control characters can use the \uFFFF notation - // for canonical formating (per RFC 8785, section 3.2.2.2.). + // for canonical formatting (per RFC 8785, section 3.2.2.2.). switch v1 { // \uFFFF notation not permitted for these characters. case '\b', '\f', '\n', '\r', '\t': diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go index ba4af4b7..e4eefa3d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go @@ -8,8 +8,7 @@ // primitive data types such as booleans, strings, and numbers, // in addition to structured data types such as objects and arrays. // -// -// Terminology +// # Terminology // // This package uses the terms "encode" and "decode" for syntactic functionality // that is concerned with processing JSON based on its grammar, and @@ -32,8 +31,7 @@ // // See RFC 8259 for more information. // -// -// Specifications +// # Specifications // // Relevant specifications include RFC 4627, RFC 7159, RFC 7493, RFC 8259, // and RFC 8785. Each RFC is generally a stricter subset of another RFC. @@ -60,8 +58,7 @@ // In particular, it makes specific choices about behavior that RFC 8259 // leaves as undefined in order to ensure greater interoperability. // -// -// JSON Representation of Go structs +// # JSON Representation of Go structs // // A Go struct is naturally represented as a JSON object, // where each Go struct field corresponds with a JSON object member. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go index 5f98a840..5b81ca15 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go @@ -347,6 +347,30 @@ func (e *Encoder) unwriteEmptyObjectMember(prevName *string) bool { return true } +// unwriteOnlyObjectMemberName unwrites the only object member name +// and returns the unquoted name. +func (e *Encoder) unwriteOnlyObjectMemberName() string { + if last := e.tokens.last; !last.isObject() || last.length() != 1 { + panic("BUG: must be called on an object after writing first name") + } + + // Unwrite the name and whitespace. + b := trimSuffixString(e.buf) + isVerbatim := bytes.IndexByte(e.buf[len(b):], '\\') < 0 + name := string(unescapeStringMayCopy(e.buf[len(b):], isVerbatim)) + e.buf = trimSuffixWhitespace(b) + + // Undo state changes. + e.tokens.last.decrement() + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + } + return name +} + func trimSuffixWhitespace(b []byte) []byte { // NOTE: The arguments and logic are kept simple to keep this inlineable. n := len(b) - 1 diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go index f7228221..60e93270 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go @@ -8,6 +8,7 @@ import ( "bytes" "io" "math/bits" + "sort" "sync" ) @@ -148,3 +149,34 @@ func putStreamingDecoder(d *Decoder) { streamingDecoderPool.Put(d) } } + +var stringsPools = &sync.Pool{New: func() any { return new(stringSlice) }} + +type stringSlice []string + +// getStrings returns a non-nil pointer to a slice with length n. +func getStrings(n int) *stringSlice { + s := stringsPools.Get().(*stringSlice) + if cap(*s) < n { + *s = make([]string, n) + } + *s = (*s)[:n] + return s +} + +func putStrings(s *stringSlice) { + if cap(*s) > 1<<10 { + *s = nil // avoid pinning arbitrarily large amounts of memory + } + stringsPools.Put(s) +} + +// Sort sorts the string slice according to RFC 8785, section 3.2.3. +func (ss *stringSlice) Sort() { + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Sort(ss) +} + +func (ss *stringSlice) Len() int { return len(*ss) } +func (ss *stringSlice) Less(i, j int) bool { return lessUTF16((*ss)[i], (*ss)[j]) } +func (ss *stringSlice) Swap(i, j int) { (*ss)[i], (*ss)[j] = (*ss)[j], (*ss)[i] } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go index d9c33f2b..ee14c753 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go @@ -721,7 +721,7 @@ func (s *uintSet) has(i uint) bool { return s.lo.has(i) } else { i -= 64 - iHi, iLo := int(i/64), uint(i%64) + iHi, iLo := int(i/64), i%64 return iHi < len(s.hi) && s.hi[iHi].has(iLo) } } @@ -735,7 +735,7 @@ func (s *uintSet) insert(i uint) bool { return !has } else { i -= 64 - iHi, iLo := int(i/64), uint(i%64) + iHi, iLo := int(i/64), i%64 if iHi >= len(s.hi) { s.hi = append(s.hi, make([]uintSet64, iHi+1-len(s.hi))...) s.hi = s.hi[:cap(s.hi)] diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go index 08509c29..9acba7da 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go @@ -112,7 +112,7 @@ func Bool(b bool) Token { return False } -// String construct a Token representing a JSON string. +// String constructs a Token representing a JSON string. // The provided string should contain valid UTF-8, otherwise invalid characters // may be mangled as the Unicode replacement character. func String(s string) Token { @@ -225,7 +225,7 @@ func (t Token) appendString(dst []byte, validateUTF8, preserveRaw bool, escapeRu } // String returns the unescaped string value for a JSON string. -// For other JSON kinds, this returns the raw JSON represention. +// For other JSON kinds, this returns the raw JSON representation. func (t Token) String() string { // This is inlinable to take advantage of "function outlining". // This avoids an allocation for the string(b) conversion @@ -373,10 +373,10 @@ func (t Token) Int() int64 { case 'i': return int64(t.num) case 'u': - if uint64(t.num) > maxInt64 { + if t.num > maxInt64 { return maxInt64 } - return int64(uint64(t.num)) + return int64(t.num) } } @@ -425,7 +425,7 @@ func (t Token) Uint() uint64 { // Handle exact integer value. switch t.str[0] { case 'u': - return uint64(t.num) + return t.num case 'i': if int64(t.num) < minUint64 { return minUint64 diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go index fe88e4fb..e0bd1b31 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go @@ -263,7 +263,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) { afterValue := d.InputOffset() if isSorted && len(*members) > 0 { - isSorted = lessUTF16(prevName, name) + isSorted = lessUTF16(prevName, []byte(name)) } *members = append(*members, memberName{name, beforeName, afterValue}) prevName = name @@ -317,7 +317,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) { // to the UTF-16 codepoints of the UTF-8 encoded input strings. // This implements the ordering specified in RFC 8785, section 3.2.3. // The inputs must be valid UTF-8, otherwise this may panic. -func lessUTF16(x, y []byte) bool { +func lessUTF16[Bytes []byte | string](x, y Bytes) bool { // NOTE: This is an optimized, allocation-free implementation // of lessUTF16Simple in fuzz_test.go. FuzzLessUTF16 verifies that the // two implementations agree on the result of comparing any two strings. @@ -326,8 +326,13 @@ func lessUTF16(x, y []byte) bool { return ('\u0000' <= r && r <= '\uD7FF') || ('\uE000' <= r && r <= '\uFFFF') } + var invalidUTF8 bool + x0, y0 := x, y for { if len(x) == 0 || len(y) == 0 { + if len(x) == len(y) && invalidUTF8 { + return string(x0) < string(y0) + } return len(x) < len(y) } @@ -341,35 +346,36 @@ func lessUTF16(x, y []byte) bool { } // Decode next pair of runes as UTF-8. - rx, nx := utf8.DecodeRune(x) - ry, ny := utf8.DecodeRune(y) - switch { - - // Both runes encode as either a single or surrogate pair - // of UTF-16 codepoints. - case isUTF16Self(rx) == isUTF16Self(ry): - if rx != ry { - return rx < ry - } + // TODO(https://go.dev/issue/56948): Use a generic implementation + // of utf8.DecodeRune, or rely on a compiler optimization to statically + // hide the cost of a type switch (https://go.dev/issue/57072). + var rx, ry rune + var nx, ny int + switch any(x).(type) { + case string: + rx, nx = utf8.DecodeRuneInString(string(x)) + ry, ny = utf8.DecodeRuneInString(string(y)) + case []byte: + rx, nx = utf8.DecodeRune([]byte(x)) + ry, ny = utf8.DecodeRune([]byte(y)) + } + selfx := isUTF16Self(rx) + selfy := isUTF16Self(ry) + switch { // The x rune is a single UTF-16 codepoint, while // the y rune is a surrogate pair of UTF-16 codepoints. - case isUTF16Self(rx): - ry, _ := utf16.EncodeRune(ry) - if rx != ry { - return rx < ry - } - panic("BUG: invalid UTF-8") // implies rx is an unpaired surrogate half - + case selfx && !selfy: + ry, _ = utf16.EncodeRune(ry) // The y rune is a single UTF-16 codepoint, while // the x rune is a surrogate pair of UTF-16 codepoints. - case isUTF16Self(ry): - rx, _ := utf16.EncodeRune(rx) - if rx != ry { - return rx < ry - } - panic("BUG: invalid UTF-8") // implies ry is an unpaired surrogate half + case selfy && !selfx: + rx, _ = utf16.EncodeRune(rx) + } + if rx != ry { + return rx < ry } + invalidUTF8 = invalidUTF8 || (rx == utf8.RuneError && nx == 1) || (ry == utf8.RuneError && ny == 1) x, y = x[nx:], y[ny:] } } diff --git a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go b/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go deleted file mode 100644 index e993fe23..00000000 --- a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapiconv - -import ( - "strings" - - klog "k8s.io/klog/v2" - builderutil "k8s.io/kube-openapi/pkg/builder3/util" - "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -var OpenAPIV2DefPrefix = "#/definitions/" -var OpenAPIV3DefPrefix = "#/components/schemas/" - -// ConvertV2ToV3 converts an OpenAPI V2 object into V3. -// Certain references may be shared between the V2 and V3 objects in the conversion. -func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI { - v3Spec := &spec3.OpenAPI{ - Version: "3.0.0", - Info: v2Spec.Info, - ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs), - Paths: ConvertPaths(v2Spec.Paths), - Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces), - } - - return v3Spec -} - -func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation { - if v2ED == nil { - return nil - } - return &spec3.ExternalDocumentation{ - ExternalDocumentationProps: spec3.ExternalDocumentationProps{ - Description: v2ED.Description, - URL: v2ED.URL, - }, - } -} - -func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components { - components := &spec3.Components{} - - if v2Definitions != nil { - components.Schemas = make(map[string]*spec.Schema) - } - for s, schema := range v2Definitions { - components.Schemas[s] = ConvertSchema(&schema) - } - if v2SecurityDefinitions != nil { - components.SecuritySchemes = make(spec3.SecuritySchemes) - } - for s, securityScheme := range v2SecurityDefinitions { - components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme) - } - if v2Responses != nil { - components.Responses = make(map[string]*spec3.Response) - } - for r, response := range v2Responses { - components.Responses[r] = ConvertResponse(&response, produces) - } - - return components -} - -func ConvertSchema(v2Schema *spec.Schema) *spec.Schema { - if v2Schema == nil { - return nil - } - v3Schema := spec.Schema{ - VendorExtensible: v2Schema.VendorExtensible, - SchemaProps: v2Schema.SchemaProps, - SwaggerSchemaProps: v2Schema.SwaggerSchemaProps, - ExtraProps: v2Schema.ExtraProps, - } - - if refString := v2Schema.Ref.String(); refString != "" { - if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 { - v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):]) - } else { - klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString) - } - } - - if v2Schema.Properties != nil { - v3Schema.Properties = make(map[string]spec.Schema) - for key, property := range v2Schema.Properties { - v3Schema.Properties[key] = *ConvertSchema(&property) - } - } - if v2Schema.Items != nil { - v3Schema.Items = &spec.SchemaOrArray{ - Schema: ConvertSchema(v2Schema.Items.Schema), - Schemas: ConvertSchemaList(v2Schema.Items.Schemas), - } - } - - if v2Schema.AdditionalProperties != nil { - v3Schema.AdditionalProperties = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema), - Allows: v2Schema.AdditionalProperties.Allows, - } - } - if v2Schema.AdditionalItems != nil { - v3Schema.AdditionalItems = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalItems.Schema), - Allows: v2Schema.AdditionalItems.Allows, - } - } - - return builderutil.WrapRefs(&v3Schema) -} - -func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema { - if v2SchemaList == nil { - return nil - } - v3SchemaList := []spec.Schema{} - for _, s := range v2SchemaList { - v3SchemaList = append(v3SchemaList, *ConvertSchema(&s)) - } - return v3SchemaList -} - -func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme { - if v2securityScheme == nil { - return nil - } - securityScheme := &spec3.SecurityScheme{ - VendorExtensible: v2securityScheme.VendorExtensible, - SecuritySchemeProps: spec3.SecuritySchemeProps{ - Description: v2securityScheme.Description, - Type: v2securityScheme.Type, - Name: v2securityScheme.Name, - In: v2securityScheme.In, - }, - } - - if v2securityScheme.Flow != "" { - securityScheme.Flows = make(map[string]*spec3.OAuthFlow) - securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{ - OAuthFlowProps: spec3.OAuthFlowProps{ - AuthorizationUrl: v2securityScheme.AuthorizationURL, - TokenUrl: v2securityScheme.TokenURL, - Scopes: v2securityScheme.Scopes, - }, - } - } - return securityScheme -} - -func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths { - if v2Paths == nil { - return nil - } - paths := &spec3.Paths{ - VendorExtensible: v2Paths.VendorExtensible, - } - - if v2Paths.Paths != nil { - paths.Paths = make(map[string]*spec3.Path) - } - for k, v := range v2Paths.Paths { - paths.Paths[k] = ConvertPathItem(v) - } - return paths -} - -func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path { - path := &spec3.Path{ - Refable: v2pathItem.Refable, - PathProps: spec3.PathProps{ - Get: ConvertOperation(v2pathItem.Get), - Put: ConvertOperation(v2pathItem.Put), - Post: ConvertOperation(v2pathItem.Post), - Delete: ConvertOperation(v2pathItem.Delete), - Options: ConvertOperation(v2pathItem.Options), - Head: ConvertOperation(v2pathItem.Head), - Patch: ConvertOperation(v2pathItem.Patch), - }, - VendorExtensible: v2pathItem.VendorExtensible, - } - for _, param := range v2pathItem.Parameters { - path.Parameters = append(path.Parameters, ConvertParameter(param)) - } - return path -} - -func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation { - if v2Operation == nil { - return nil - } - operation := &spec3.Operation{ - VendorExtensible: v2Operation.VendorExtensible, - OperationProps: spec3.OperationProps{ - Description: v2Operation.Description, - ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs), - Tags: v2Operation.Tags, - Summary: v2Operation.Summary, - Deprecated: v2Operation.Deprecated, - OperationId: v2Operation.ID, - }, - } - - for _, param := range v2Operation.Parameters { - if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil { - operation.OperationProps.RequestBody = &spec3.RequestBody{ - RequestBodyProps: spec3.RequestBodyProps{}, - } - if v2Operation.Consumes != nil { - operation.RequestBody.Content = make(map[string]*spec3.MediaType) - } - for _, consumer := range v2Operation.Consumes { - operation.RequestBody.Content[consumer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(param.ParamProps.Schema), - }, - } - } - } else { - operation.Parameters = append(operation.Parameters, ConvertParameter(param)) - } - } - - operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{ - Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces), - }, - VendorExtensible: v2Operation.Responses.VendorExtensible, - } - - if v2Operation.Responses.StatusCodeResponses != nil { - operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response) - } - for k, v := range v2Operation.Responses.StatusCodeResponses { - operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces) - } - return operation -} - -func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response { - if v2Response == nil { - return nil - } - response := &spec3.Response{ - Refable: ConvertRefableResponse(v2Response.Refable), - VendorExtensible: v2Response.VendorExtensible, - ResponseProps: spec3.ResponseProps{ - Description: v2Response.Description, - }, - } - - if v2Response.Schema != nil { - if produces != nil { - response.Content = make(map[string]*spec3.MediaType) - } - for _, producer := range produces { - response.ResponseProps.Content[producer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(v2Response.Schema), - }, - } - } - } - return response -} - -func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter { - param := &spec3.Parameter{ - Refable: ConvertRefableParameter(v2Param.Refable), - VendorExtensible: v2Param.VendorExtensible, - ParameterProps: spec3.ParameterProps{ - Name: v2Param.Name, - Description: v2Param.Description, - In: v2Param.In, - Required: v2Param.Required, - Schema: ConvertSchema(v2Param.Schema), - AllowEmptyValue: v2Param.AllowEmptyValue, - }, - } - // Convert SimpleSchema into Schema - if param.Schema == nil { - param.Schema = &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{v2Param.Type}, - Format: v2Param.Format, - UniqueItems: v2Param.UniqueItems, - }, - } - } - - return param -} - -func ConvertRefableParameter(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))} - } - return refable -} - -func ConvertRefableResponse(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))} - } - return refable -} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go new file mode 100644 index 00000000..61141a50 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go @@ -0,0 +1,260 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema. +// - models: a map from definition name to OpenAPI V3 structural schema for each definition. +// Key in map is used to resolve references in the schema. +// - preserveUnknownFields: flag indicating whether unknown fields in all schemas should be preserved. +// - returns: nil and an error if there is a parse error, or if schema does not satisfy a +// required structural schema invariant for conversion. If no error, returns +// a new smd schema. +// +// Schema should be validated as structural before using with this function, or +// there may be information lost. +func ToSchemaFromOpenAPI(models map[string]*spec.Schema, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + + for name, spec := range models { + // Skip/Ignore top-level references + if len(spec.Ref.String()) > 0 { + continue + } + + var a schema.Atom + + // Hard-coded schemas for now as proto_models implementation functions. + // https://github.com/kubernetes/kube-openapi/issues/364 + if name == quantityResource { + a = schema.Atom{ + Scalar: untypedDef.Atom.Scalar, + } + } else if name == rawExtensionResource { + a = untypedDef.Atom + } else { + c2 := c.push(name, &a) + c2.visitSpec(spec) + c.pop(c2) + } + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) visitSpec(m *spec.Schema) { + // Check if this schema opts its descendants into preserve-unknown-fields + if p, ok := m.Extensions["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + c.preserveUnknownFields = true + } + a := c.top() + *a = c.parseSchema(m) +} + +func (c *convert) parseSchema(m *spec.Schema) schema.Atom { + // k8s-generated OpenAPI specs have historically used only one value for + // type and starting with OpenAPIV3 it is only allowed to be + // a single string. + typ := "" + if len(m.Type) > 0 { + typ = m.Type[0] + } + + // Structural Schemas produced by kubernetes follow very specific rules which + // we can use to infer the SMD type: + switch typ { + case "": + // According to Swagger docs: + // https://swagger.io/docs/specification/data-models/data-types/#any + // + // If no type is specified, it is equivalent to accepting any type. + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: c.parseList(m), + Map: c.parseObject(m), + } + + case "object": + return schema.Atom{ + Map: c.parseObject(m), + } + case "array": + return schema.Atom{ + List: c.parseList(m), + } + case "integer", "boolean", "number", "string": + return convertPrimitive(typ, m.Format) + default: + c.reportError("unrecognized type: '%v'", typ) + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + } + } +} + +func (c *convert) makeOpenAPIRef(specSchema *spec.Schema) schema.TypeRef { + refString := specSchema.Ref.String() + + // Special-case handling for $ref stored inside a single-element allOf + if len(refString) == 0 && len(specSchema.AllOf) == 1 && len(specSchema.AllOf[0].Ref.String()) > 0 { + refString = specSchema.AllOf[0].Ref.String() + } + + if _, n := path.Split(refString); len(n) > 0 { + //!TODO: Refactor the field ElementRelationship override + // we can generate the types with overrides ahead of time rather than + // requiring the hacky runtime support + // (could just create a normalized key struct containing all customizations + // to deduplicate) + mapRelationship, err := getMapElementRelationship(specSchema.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + if len(mapRelationship) > 0 { + return schema.TypeRef{ + NamedType: &n, + ElementRelationship: &mapRelationship, + } + } + + return schema.TypeRef{ + NamedType: &n, + } + + } + var inlined schema.Atom + + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &inlined) + c2.preserveUnknownFields = c.preserveUnknownFields + c2.visitSpec(specSchema) + c.pop(c2) + + return schema.TypeRef{ + Inlined: inlined, + } +} + +func (c *convert) parseObject(s *spec.Schema) *schema.Map { + var fields []schema.StructField + for name, member := range s.Properties { + fields = append(fields, schema.StructField{ + Name: name, + Type: c.makeOpenAPIRef(&member), + Default: member.Default, + }) + } + + // AdditionalProperties informs the schema of any "unknown" keys + // Unknown keys are enforced by the ElementType field. + elementType := func() schema.TypeRef { + if s.AdditionalProperties == nil { + // According to openAPI spec, an object without properties and without + // additionalProperties is assumed to be a free-form object. + if c.preserveUnknownFields || len(s.Properties) == 0 { + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + // If properties are specified, do not implicitly allow unknown + // fields + return schema.TypeRef{} + } else if s.AdditionalProperties.Schema != nil { + // Unknown fields use the referred schema + return c.makeOpenAPIRef(s.AdditionalProperties.Schema) + + } else if s.AdditionalProperties.Allows { + // A boolean instead of a schema was provided. Deduce the + // type from the value provided at runtime. + return schema.TypeRef{ + NamedType: &deducedName, + } + } else { + // Additional Properties are explicitly disallowed by the user. + // Ensure element type is empty. + return schema.TypeRef{} + } + }() + + relationship, err := getMapElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + return &schema.Map{ + Fields: fields, + ElementRelationship: relationship, + ElementType: elementType, + } +} + +func (c *convert) parseList(s *spec.Schema) *schema.List { + relationship, mapKeys, err := getListElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + elementType := func() schema.TypeRef { + if s.Items != nil { + if s.Items.Schema == nil || s.Items.Len() != 1 { + c.reportError("structural schema arrays must have exactly one member subtype") + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + subSchema := s.Items.Schema + if subSchema == nil { + subSchema = &s.Items.Schemas[0] + } + return c.makeOpenAPIRef(subSchema) + } else if len(s.Type) > 0 && len(s.Type[0]) > 0 { + c.reportError("`items` must be specified on arrays") + } + + // A list with no items specified is treated as "untyped". + return schema.TypeRef{ + NamedType: &untypedName, + } + + }() + + return &schema.List{ + ElementRelationship: relationship, + Keys: mapKeys, + ElementType: elementType, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go new file mode 100644 index 00000000..2c6fd76a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + for _, name := range models.ListModels() { + model := models.LookupModel(name) + + var a schema.Atom + c2 := c.push(name, &a) + model.Accept(c2) + c.pop(c2) + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + + mapRelationship, err := getMapElementRelationship(model.GetExtensions()) + + if err != nil { + c.reportError(err.Error()) + } + + // empty string means unset. + if len(mapRelationship) > 0 { + tr.ElementRelationship = &mapRelationship + } + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + a.Map.ElementRelationship, err = getMapElementRelationship(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } +} + +func (c *convert) VisitArray(a *proto.Array) { + relationship, mapKeys, err := getListElementRelationship(a.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + atom := c.top() + atom.List = &schema.List{ + ElementType: c.makeRef(a.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + Keys: mapKeys, + } +} + +func (c *convert) VisitMap(m *proto.Map) { + relationship, err := getMapElementRelationship(m.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + a := c.top() + a.Map = &schema.Map{ + ElementType: c.makeRef(m.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + } +} + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + if c.currentName == quantityResource { + a.Scalar = ptr(schema.Scalar("untyped")) + } else { + *a = convertPrimitive(p.Type, p.Format) + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = deducedDef.Atom +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index bec0e780..9887d185 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -17,43 +17,18 @@ limitations under the License. package schemaconv import ( - "errors" "fmt" - "path" "sort" - "strings" - "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/v4/schema" ) const ( - quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + rawExtensionResource = "io.k8s.apimachinery.pkg.runtime.RawExtension" ) -// ToSchema converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2). -func ToSchema(models proto.Models) (*schema.Schema, error) { - return ToSchemaWithPreserveUnknownFields(models, false) -} - -// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. -func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { - c := convert{ - input: models, - preserveUnknownFields: preserveUnknownFields, - output: &schema.Schema{}, - } - if err := c.convertAll(); err != nil { - return nil, err - } - c.addCommonTypes() - return c.output, nil -} - type convert struct { - input proto.Models preserveUnknownFields bool output *schema.Schema @@ -64,7 +39,6 @@ type convert struct { func (c *convert) push(name string, a *schema.Atom) *convert { return &convert{ - input: c.input, preserveUnknownFields: c.preserveUnknownFields, output: c.output, currentName: name, @@ -78,30 +52,17 @@ func (c *convert) pop(c2 *convert) { c.errorMessages = append(c.errorMessages, c2.errorMessages...) } -func (c *convert) convertAll() error { - for _, name := range c.input.ListModels() { - model := c.input.LookupModel(name) - c.insertTypeDef(name, model) - } - if len(c.errorMessages) > 0 { - return errors.New(strings.Join(c.errorMessages, "\n")) - } - return nil -} - func (c *convert) reportError(format string, args ...interface{}) { c.errorMessages = append(c.errorMessages, c.currentName+": "+fmt.Sprintf(format, args...), ) } -func (c *convert) insertTypeDef(name string, model proto.Schema) { +func (c *convert) insertTypeDef(name string, atom schema.Atom) { def := schema.TypeDef{ Name: name, + Atom: atom, } - c2 := c.push(name, &def.Atom) - model.Accept(c2) - c.pop(c2) if def.Atom == (schema.Atom{}) { // This could happen if there were a top-level reference. return @@ -156,46 +117,6 @@ var deducedDef schema.TypeDef = schema.TypeDef{ }, } -func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { - var tr schema.TypeRef - if r, ok := model.(*proto.Ref); ok { - if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { - return schema.TypeRef{ - NamedType: &untypedName, - } - } - // reference a named type - _, n := path.Split(r.Reference()) - tr.NamedType = &n - - ext := model.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { - switch val { - case "atomic": - relationship := schema.Atomic - tr.ElementRelationship = &relationship - case "granular": - relationship := schema.Separable - tr.ElementRelationship = &relationship - default: - c.reportError("unknown map type %v", val) - } - } - } else { - // compute the type inline - c2 := c.push("inlined in "+c.currentName, &tr.Inlined) - c2.preserveUnknownFields = preserveUnknownFields - model.Accept(c2) - c.pop(c2) - - if tr == (schema.TypeRef{}) { - // emit warning? - tr.NamedType = &untypedName - } - } - return tr -} - func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { schemaUnions := []schema.Union{} if iunions, ok := extensions["x-kubernetes-unions"]; ok { @@ -293,58 +214,9 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { } } - if union.Discriminator != nil && len(union.Fields) == 0 { - return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) - } return union, nil } -func (c *convert) VisitKind(k *proto.Kind) { - preserveUnknownFields := c.preserveUnknownFields - if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { - preserveUnknownFields = true - } - - a := c.top() - a.Map = &schema.Map{} - for _, name := range k.FieldOrder { - member := k.Fields[name] - tr := c.makeRef(member, preserveUnknownFields) - a.Map.Fields = append(a.Map.Fields, schema.StructField{ - Name: name, - Type: tr, - Default: member.GetDefault(), - }) - } - - unions, err := makeUnions(k.GetExtensions()) - if err != nil { - c.reportError(err.Error()) - return - } - // TODO: We should check that the fields and discriminator - // specified in the union are actual fields in the struct. - a.Map.Unions = unions - - if preserveUnknownFields { - a.Map.ElementType = schema.TypeRef{ - NamedType: &deducedName, - } - } - - ext := k.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { - switch val { - case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable - default: - c.reportError("unknown map type %v", val) - } - } -} - func toStringSlice(o interface{}) (out []string, ok bool) { switch t := o.(type) { case []interface{}: @@ -355,117 +227,108 @@ func toStringSlice(o interface{}) (out []string, ok bool) { } } return out, true + case []string: + return t, true } return nil, false } -func (c *convert) VisitArray(a *proto.Array) { - atom := c.top() - atom.List = &schema.List{ - ElementRelationship: schema.Atomic, - } - l := atom.List - l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) - - ext := a.GetExtensions() +func ptr(s schema.Scalar) *schema.Scalar { return &s } - if val, ok := ext["x-kubernetes-list-type"]; ok { - if val == "atomic" { - l.ElementRelationship = schema.Atomic - } else if val == "set" { - l.ElementRelationship = schema.Associative - } else if val == "map" { - l.ElementRelationship = schema.Associative - if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { - if keyNames, ok := toStringSlice(keys); ok { - l.Keys = keyNames - } else { - c.reportError("uninterpreted map keys: %#v", keys) - } - } else { - c.reportError("missing map keys") - } - } else { - c.reportError("unknown list type %v", val) - l.ElementRelationship = schema.Atomic - } - } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { - if val == "merge" || val == "merge,retainKeys" { - l.ElementRelationship = schema.Associative - if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { - if keyName, ok := key.(string); ok { - l.Keys = []string{keyName} - } else { - c.reportError("uninterpreted merge key: %#v", key) - } - } else { - // It's not an error for this to be absent, it - // means it's a set. - } - } else if val == "retainKeys" { - } else { - c.reportError("unknown patch strategy %v", val) - l.ElementRelationship = schema.Atomic +// Basic conversion functions to convert OpenAPI schema definitions to +// SMD Schema atoms +func convertPrimitive(typ string, format string) (a schema.Atom) { + switch typ { + case "integer": + a.Scalar = ptr(schema.Numeric) + case "number": + a.Scalar = ptr(schema.Numeric) + case "string": + switch format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } + case "boolean": + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } -} -func (c *convert) VisitMap(m *proto.Map) { - a := c.top() - a.Map = &schema.Map{} - a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + return a +} - ext := m.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { +func getListElementRelationship(ext map[string]any) (schema.ElementRelationship, []string, error) { + if val, ok := ext["x-kubernetes-list-type"]; ok { switch val { case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable + return schema.Atomic, nil, nil + case "set": + return schema.Associative, nil, nil + case "map": + keys, ok := ext["x-kubernetes-list-map-keys"] + + if !ok { + return schema.Associative, nil, fmt.Errorf("missing map keys") + } + + keyNames, ok := toStringSlice(keys) + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted map keys: %#v", keys) + } + + return schema.Associative, keyNames, nil default: - c.reportError("unknown map type %v", val) + return schema.Atomic, nil, fmt.Errorf("unknown list type %v", val) } - } -} + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + switch val { + case "merge", "merge,retainKeys": + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + keyName, ok := key.(string) -func ptr(s schema.Scalar) *schema.Scalar { return &s } + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted merge key: %#v", key) + } -func (c *convert) VisitPrimitive(p *proto.Primitive) { - a := c.top() - if c.currentName == quantityResource { - a.Scalar = ptr(schema.Scalar("untyped")) - } else { - switch p.Type { - case proto.Integer: - a.Scalar = ptr(schema.Numeric) - case proto.Number: - a.Scalar = ptr(schema.Numeric) - case proto.String: - switch p.Format { - case "": - a.Scalar = ptr(schema.String) - case "byte": - // byte really means []byte and is encoded as a string. - a.Scalar = ptr(schema.String) - case "int-or-string": - a.Scalar = ptr(schema.Scalar("untyped")) - case "date-time": - a.Scalar = ptr(schema.Scalar("untyped")) - default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Associative, []string{keyName}, nil } - case proto.Boolean: - a.Scalar = ptr(schema.Boolean) + // It's not an error for x-kubernetes-patch-merge-key to be absent, + // it means it's a set + return schema.Associative, nil, nil + case "retainKeys": + return schema.Atomic, nil, nil default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Atomic, nil, fmt.Errorf("unknown patch strategy %v", val) } } -} -func (c *convert) VisitArbitrary(a *proto.Arbitrary) { - *c.top() = deducedDef.Atom + // Treat as atomic by default + return schema.Atomic, nil, nil } -func (c *convert) VisitReference(proto.Reference) { - // Do nothing, we handle references specially +// Returns map element relationship if specified, or empty string if unspecified +func getMapElementRelationship(ext map[string]any) (schema.ElementRelationship, error) { + val, ok := ext["x-kubernetes-map-type"] + if !ok { + // unset Map element relationship + return "", nil + } + + switch val { + case "atomic": + return schema.Atomic, nil + case "granular": + return schema.Separable, nil + default: + return "", fmt.Errorf("unknown map type %v", val) + } } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 51dac4bd..1f62c6e7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -29,6 +32,9 @@ type Encoding struct { // MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON func (e *Encoding) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.EncodingProps) if err != nil { return nil, err @@ -40,7 +46,20 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *Encoding) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + EncodingProps encodingPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.EncodingProps = encodingPropsOmitZero(e.EncodingProps) + return opts.MarshalNext(enc, x) +} + func (e *Encoding) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.EncodingProps); err != nil { return err } @@ -50,6 +69,20 @@ func (e *Encoding) UnmarshalJSON(data []byte) error { return nil } +func (e *Encoding) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + EncodingProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.EncodingProps = x.EncodingProps + return nil +} + type EncodingProps struct { // Content Type for encoding a specific property ContentType string `json:"contentType,omitempty"` @@ -58,7 +91,15 @@ type EncodingProps struct { // Describes how a specific property value will be serialized depending on its type Style string `json:"style,omitempty"` // When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect - Explode string `json:"explode,omitempty"` + Explode bool `json:"explode,omitempty"` // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } + +type encodingPropsOmitZero struct { + ContentType string `json:"contentType,omitempty"` + Headers map[string]*Header `json:"headers,omitempty"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 0f5ab983..8834a92e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -19,8 +19,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + + "k8s.io/kube-openapi/pkg/validation/spec" ) // Example https://swagger.io/specification/#example-object @@ -33,6 +36,9 @@ type Example struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (e *Example) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.Refable) if err != nil { return nil, err @@ -47,8 +53,22 @@ func (e *Example) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b1, b2, b3), nil } +func (e *Example) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ExampleProps `json:",inline"` + spec.Extensions + } + x.Ref = e.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExampleProps = e.ExampleProps + return opts.MarshalNext(enc, x) +} func (e *Example) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.Refable); err != nil { return err } @@ -61,6 +81,23 @@ func (e *Example) UnmarshalJSON(data []byte) error { return nil } +func (e *Example) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExampleProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&e.Ref.Ref, x.Extensions); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExampleProps = x.ExampleProps + + return nil +} + type ExampleProps struct { // Summary holds a short description of the example Summary string `json:"summary,omitempty"` diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index 117113e7..f0515496 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -18,8 +18,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) type ExternalDocumentation struct { @@ -36,6 +39,9 @@ type ExternalDocumentationProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.ExternalDocumentationProps) if err != nil { return nil, err @@ -47,7 +53,20 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *ExternalDocumentation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ExternalDocumentationProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExternalDocumentationProps = e.ExternalDocumentationProps + return opts.MarshalNext(enc, x) +} + func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil { return err } @@ -56,3 +75,16 @@ func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { } return nil } + +func (e *ExternalDocumentation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExternalDocumentationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExternalDocumentationProps = x.ExternalDocumentationProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go new file mode 100644 index 00000000..08b6246c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -0,0 +1,281 @@ +package spec3 + +import ( + "math/rand" + "strings" + + fuzz "github.com/google/gofuzz" + + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// refChance is the chance that a particular component will use a $ref +// instead of fuzzed. Expressed as a fraction 1/n, currently there is +// a 1/3 chance that a ref will be used. +const refChance = 3 + +const alphaNumChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randAlphanumString() string { + arr := make([]string, rand.Intn(10)+5) + for i := 0; i < len(arr); i++ { + arr[i] = string(alphaNumChars[rand.Intn(len(alphaNumChars))]) + } + return strings.Join(arr, "") +} + +var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ + func(s *string, c fuzz.Continue) { + // All OpenAPI V3 map keys must follow the corresponding + // regex. Note that this restricts the range for all other + // string values as well. + str := randAlphanumString() + *s = str + }, + func(o *OpenAPI, c fuzz.Continue) { + c.FuzzNoCustom(o) + o.Version = "3.0.0" + for i, val := range o.SecurityRequirement { + if val == nil { + o.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + + }, + func(r *interface{}, c fuzz.Continue) { + switch c.Intn(3) { + case 0: + *r = nil + case 1: + n := c.RandString() + "x" + *r = n + case 2: + n := c.Float64() + *r = n + } + }, + func(v **spec.Info, c fuzz.Continue) { + // Info is never nil + *v = &spec.Info{} + c.FuzzNoCustom(*v) + (*v).Title = c.RandString() + "x" + }, + func(v *Paths, c fuzz.Continue) { + c.Fuzz(&v.VendorExtensible) + num := c.Intn(5) + if num > 0 { + v.Paths = make(map[string]*Path) + } + for i := 0; i < num; i++ { + val := Path{} + c.Fuzz(&val) + v.Paths["/"+c.RandString()] = &val + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + switch c.Intn(4) { + case 0: + v.Type = "apiKey" + v.Name = c.RandString() + "x" + switch c.Intn(3) { + case 0: + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + case 1: + v.Type = "http" + case 2: + v.Type = "oauth2" + v.Flows = make(map[string]*OAuthFlow) + flow := OAuthFlow{} + flow.AuthorizationUrl = c.RandString() + "x" + v.Flows["implicit"] = &flow + flow.Scopes = make(map[string]string) + flow.Scopes["foo"] = "bar" + case 3: + v.Type = "openIdConnect" + v.OpenIdConnectUrl = "https://" + c.RandString() + } + v.Scheme = "basic" + }, + func(v *spec.Ref, c fuzz.Continue) { + switch c.Intn(7) { + case 0: + *v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString()) + case 1: + *v = spec.MustCreateRef("#/components/responses/" + randAlphanumString()) + case 2: + *v = spec.MustCreateRef("#/components/headers/" + randAlphanumString()) + case 3: + *v = spec.MustCreateRef("#/components/securitySchemes/" + randAlphanumString()) + case 5: + *v = spec.MustCreateRef("#/components/parameters/" + randAlphanumString()) + case 6: + *v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString()) + } + }, + func(v *Parameter, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ParameterProps) + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + // Header param + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + }, + func(v *RequestBody, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.RequestBodyProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *Header, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.HeaderProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *ResponsesProps, c fuzz.Continue) { + c.Fuzz(&v.Default) + n := c.Intn(5) + for i := 0; i < n; i++ { + r2 := Response{} + c.Fuzz(&r2) + // HTTP Status code in 100-599 Range + code := c.Intn(500) + 100 + v.StatusCodeResponses = make(map[int]*Response) + v.StatusCodeResponses[code] = &r2 + } + }, + func(v *Response, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ResponseProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + // Do not fuzz null values into the array. + for i, val := range v.SecurityRequirement { + if val == nil { + v.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, + func(v *spec.Extensions, c fuzz.Continue) { + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = spec.Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *spec.ExternalDocumentation, c fuzz.Continue) { + c.Fuzz(&v.Description) + v.URL = "https://" + randAlphanumString() + }, + func(v *spec.SchemaURL, c fuzz.Continue) { + *v = spec.SchemaURL("https://" + randAlphanumString()) + }, + func(v *spec.SchemaOrBool, c fuzz.Continue) { + *v = spec.SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &spec.Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v *spec.SchemaOrArray, c fuzz.Continue) { + *v = spec.SchemaOrArray{} + if c.RandBool() { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schema = &schema + } else { + v.Schemas = []spec.Schema{} + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schemas = append(v.Schemas, schema) + } + + } + + }, + func(v *spec.SchemaOrStringArray, c fuzz.Continue) { + if c.RandBool() { + *v = spec.SchemaOrStringArray{} + if c.RandBool() { + c.Fuzz(&v.Property) + } else { + c.Fuzz(&v.Schema) + } + } + }, + func(v *spec.Schema, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Ref) + return + } + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = spec.StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.ExtraProps) + } + + }, +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index cead4b15..9ea30628 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -34,6 +36,9 @@ type Header struct { // MarshalJSON is a custom marshal function that knows how to encode Header as JSON func (h *Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.Refable) if err != nil { return nil, err @@ -49,7 +54,22 @@ func (h *Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (h *Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + HeaderProps headerPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = h.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = headerPropsOmitZero(h.HeaderProps) + return opts.MarshalNext(enc, x) +} + func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, h) + } if err := json.Unmarshal(data, &h.Refable); err != nil { return err } @@ -63,6 +83,22 @@ func (h *Header) UnmarshalJSON(data []byte) error { return nil } +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + HeaderProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&h.Ref.Ref, x.Extensions); err != nil { + return err + } + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + return nil +} + // HeaderProps a struct that describes a header object type HeaderProps struct { // Description holds a brief description of the parameter @@ -88,3 +124,19 @@ type HeaderProps struct { // Examples of the header Examples map[string]*Example `json:"examples,omitempty"` } + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type headerPropsOmitZero struct { + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index 828fd8dc..47eef1ed 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -32,6 +35,9 @@ type MediaType struct { // MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON func (m *MediaType) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(m) + } b1, err := json.Marshal(m.MediaTypeProps) if err != nil { return nil, err @@ -43,7 +49,20 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *MediaType) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + MediaTypeProps mediaTypePropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.MediaTypeProps = mediaTypePropsOmitZero(e.MediaTypeProps) + return opts.MarshalNext(enc, x) +} + func (m *MediaType) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, m) + } if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil { return err } @@ -53,10 +72,24 @@ func (m *MediaType) UnmarshalJSON(data []byte) error { return nil } +func (m *MediaType) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + MediaTypeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + m.Extensions = internal.SanitizeExtensions(x.Extensions) + m.MediaTypeProps = x.MediaTypeProps + + return nil +} + // MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject type MediaTypeProps struct { // Schema holds the schema defining the type used for the media type - Schema *spec.Schema `json:"schema,omitempty"` + Schema *spec.Schema `json:"schema,omitempty"` // Example of the media type Example interface{} `json:"example,omitempty"` // Examples of the media type. Each example object should match the media type and specific schema if present @@ -64,3 +97,10 @@ type MediaTypeProps struct { // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded Encoding map[string]*Encoding `json:"encoding,omitempty"` } + +type mediaTypePropsOmitZero struct { + Schema *spec.Schema `json:"schema,omitzero"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` + Encoding map[string]*Encoding `json:"encoding,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index de8aa460..f1e10254 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -19,8 +19,10 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject @@ -33,6 +35,9 @@ type Operation struct { // MarshalJSON is a custom marshal function that knows how to encode Operation as JSON func (o *Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -44,14 +49,40 @@ func (o *Operation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (o *Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + spec.Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, o) + } if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } return json.Unmarshal(data, &o.VendorExtensible) } +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + OperationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = x.OperationProps + return nil +} + // OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject type OperationProps struct { // Tags holds a list of tags for API documentation control @@ -73,7 +104,21 @@ type OperationProps struct { // Deprecated declares this operation to be deprecated Deprecated bool `json:"deprecated,omitempty"` // SecurityRequirement holds a declaration of which security mechanisms can be used for this operation - SecurityRequirement []*SecurityRequirement `json:"security,omitempty"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } + +type operationPropsOmitZero struct { + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + Description string `json:"description,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + OperationId string `json:"operationId,omitempty"` + Parameters []*Parameter `json:"parameters,omitempty"` + RequestBody *RequestBody `json:"requestBody,omitzero"` + Responses *Responses `json:"responses,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + Servers []*Server `json:"servers,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index 0d7180e5..ada7edb6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -34,6 +36,9 @@ type Parameter struct { // MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON func (p *Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -49,7 +54,23 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ParameterProps parameterPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParameterProps = parameterPropsOmitZero(p.ParameterProps) + return opts.MarshalNext(enc, x) +} + func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -63,6 +84,22 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return nil } +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ParameterProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParameterProps = x.ParameterProps + return nil +} + // ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject type ParameterProps struct { // Name holds the name of the parameter @@ -92,3 +129,19 @@ type ParameterProps struct { // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding Examples map[string]*Example `json:"examples,omitempty"` } + +type parameterPropsOmitZero struct { + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index bc48c504..16fbbb4d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -18,10 +18,13 @@ package spec3 import ( "encoding/json" + "fmt" "strings" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject @@ -32,19 +35,48 @@ type Paths struct { // MarshalJSON is a custom marshal function that knows how to encode Paths as JSON func (p *Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.Paths) + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err } - b2, err := json.Marshal(p.VendorExtensible) + + pths := make(map[string]*Path) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) if err != nil { return nil, err } - return swag.ConcatJSON(b1, b2), nil + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (p *Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) } // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { return err @@ -74,6 +106,59 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *p = Paths{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi := Path{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]*Path) + } + p.Paths[k] = &pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject // // Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible @@ -85,6 +170,9 @@ type Path struct { // MarshalJSON is a custom marshal function that knows how to encode Path as JSON func (p *Path) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -100,7 +188,22 @@ func (p *Path) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Path) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + PathProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathProps = p.PathProps + return opts.MarshalNext(enc, x) +} + func (p *Path) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -113,6 +216,24 @@ func (p *Path) UnmarshalJSON(data []byte) error { return nil } +func (p *Path) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + PathProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathProps = x.PathProps + + return nil +} + // PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject type PathProps struct { // Summary holds a summary for all operations in this path diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 0adc6282..6f8607e4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -19,8 +19,10 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject @@ -34,6 +36,9 @@ type RequestBody struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (r *RequestBody) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -49,7 +54,22 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *RequestBody) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + RequestBodyProps requestBodyPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.RequestBodyProps = requestBodyPropsOmitZero(r.RequestBodyProps) + return opts.MarshalNext(enc, x) +} + func (r *RequestBody) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -71,3 +91,25 @@ type RequestBodyProps struct { // Required determines if the request body is required in the request Required bool `json:"required,omitempty"` } + +type requestBodyPropsOmitZero struct { + Description string `json:"description,omitempty"` + Content map[string]*MediaType `json:"content,omitempty"` + Required bool `json:"required,omitzero"` +} + +func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + RequestBodyProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.RequestBodyProps = x.RequestBodyProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index ccd73369..73e241fd 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -18,10 +18,13 @@ package spec3 import ( "encoding/json" + "fmt" "strconv" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Responses holds the list of possible responses as they are returned from executing this operation @@ -34,6 +37,9 @@ type Responses struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (r *Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -45,14 +51,35 @@ func (r *Responses) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitzero"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } - return nil } @@ -78,25 +105,91 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]*Response + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } if v, ok := res["default"]; ok { - r.Default = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value delete(res, "default") } for k, v := range res { + // Take all integral keys if nk, err := strconv.Atoi(k); err == nil { if r.StatusCodeResponses == nil { r.StatusCodeResponses = map[int]*Response{} } - r.StatusCodeResponses[nk] = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = &value } } return nil } +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *r = Responses{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + r.ResponsesProps.Default = &resp + default: + if nk, err := strconv.Atoi(k); err == nil { + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]*Response{} + } + r.StatusCodeResponses[nk] = &resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject // // Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible @@ -108,6 +201,9 @@ type Response struct { // MarshalJSON is a custom marshal function that knows how to encode Response as JSON func (r *Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -123,7 +219,22 @@ func (r *Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + ResponseProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = r.ResponseProps + return opts.MarshalNext(enc, x) +} + func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -133,7 +244,22 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } + return nil +} +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ResponseProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps return nil } @@ -149,7 +275,6 @@ type ResponseProps struct { Links map[string]*Link `json:"links,omitempty"` } - // Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object type Link struct { spec.Refable @@ -159,6 +284,9 @@ type Link struct { // MarshalJSON is a custom marshal function that knows how to encode Link as JSON func (r *Link) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -174,7 +302,22 @@ func (r *Link) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *Link) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + LinkProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.LinkProps = r.LinkProps + return opts.MarshalNext(enc, x) +} + func (r *Link) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -188,6 +331,22 @@ func (r *Link) UnmarshalJSON(data []byte) error { return nil } +func (l *Link) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + LinkProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&l.Ref.Ref, x.Extensions); err != nil { + return err + } + l.Extensions = internal.SanitizeExtensions(x.Extensions) + l.LinkProps = x.LinkProps + return nil +} + // LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject type LinkProps struct { // OperationId is the name of an existing, resolvable OAS operation diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go deleted file mode 100644 index 0ce8924e..00000000 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -// -// Note that this struct is actually a thin wrapper around SecurityRequirementProps to make it referable and extensible -type SecurityRequirement struct { - SecurityRequirementProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode SecurityRequirement as JSON -func (s *SecurityRequirement) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecurityRequirementProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (s *SecurityRequirement) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecurityRequirementProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -type SecurityRequirementProps map[string][]string diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index 9b1352f4..dd1e98ed 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -19,8 +19,10 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject @@ -32,6 +34,9 @@ type SecurityScheme struct { // MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON func (s *SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -47,6 +52,18 @@ func (s *SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (s *SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + SecuritySchemeProps `json:",inline"` + spec.Extensions + } + x.Ref = s.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index a505fb22..654a42c0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -18,9 +18,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) type Server struct { @@ -39,6 +41,9 @@ type ServerProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *Server) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerProps) if err != nil { return nil, err @@ -50,7 +55,21 @@ func (s *Server) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *Server) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerProps = s.ServerProps + return opts.MarshalNext(enc, x) +} + func (s *Server) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } + if err := json.Unmarshal(data, &s.ServerProps); err != nil { return err } @@ -60,6 +79,20 @@ func (s *Server) UnmarshalJSON(data []byte) error { return nil } +func (s *Server) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerProps = x.ServerProps + + return nil +} + type ServerVariable struct { ServerVariableProps spec.VendorExtensible @@ -76,6 +109,9 @@ type ServerVariableProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *ServerVariable) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerVariableProps) if err != nil { return nil, err @@ -87,7 +123,20 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *ServerVariable) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerVariableProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerVariableProps = s.ServerVariableProps + return opts.MarshalNext(enc, x) +} + func (s *ServerVariable) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil { return err } @@ -96,3 +145,17 @@ func (s *ServerVariable) UnmarshalJSON(data []byte) error { } return nil } + +func (s *ServerVariable) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerVariableProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerVariableProps = x.ServerVariableProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index 3ff48a3c..5db819c7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -17,6 +17,10 @@ limitations under the License. package spec3 import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -32,6 +36,40 @@ type OpenAPI struct { Servers []*Server `json:"servers,omitempty"` // Components hold various schemas for the specification Components *Components `json:"components,omitempty"` + // SecurityRequirement holds a declaration of which security mechanisms can be used across the API + SecurityRequirement []map[string][]string `json:"security,omitempty"` // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } + +func (o *OpenAPI) UnmarshalJSON(data []byte) error { + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, &p) + } + return json.Unmarshal(data, &p) +} + +func (o *OpenAPI) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + return json.Marshal(&p) +} + +func (o *OpenAPI) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type OpenAPIOmitZero struct { + Version string `json:"openapi"` + Info *spec.Info `json:"info"` + Paths *Paths `json:"paths,omitzero"` + Servers []*Server `json:"servers,omitempty"` + Components *Components `json:"components,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + } + x := (*OpenAPIOmitZero)(o) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 763923df..5789e67a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" "gopkg.in/yaml.v2" ) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go index a3f476d5..d9f2896e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -21,7 +21,7 @@ import ( "reflect" "strings" - openapi_v3 "github.com/google/gnostic/openapiv3" + openapi_v3 "github.com/google/gnostic-models/openapiv3" "gopkg.in/yaml.v3" ) @@ -120,7 +120,7 @@ func (d *Definitions) ParseSchemaV3(s *openapi_v3.Schema, path *Path) (Schema, e switch s.GetType() { case object: for _, extension := range s.GetSpecificationExtension() { - if extension.Name == "x-kuberentes-group-version-kind" { + if extension.Name == "x-kubernetes-group-version-kind" { // Objects with x-kubernetes-group-version-kind are always top // level types. return d.parseV3Kind(s, path) @@ -285,7 +285,7 @@ func parseV3Interface(def *yaml.Node) (interface{}, error) { func (d *Definitions) parseV3BaseSchema(s *openapi_v3.Schema, path *Path) (*BaseSchema, error) { if s == nil { - return nil, fmt.Errorf("cannot initializae BaseSchema from nil") + return nil, fmt.Errorf("cannot initialize BaseSchema from nil") } def, err := parseV3Interface(s.GetDefault().ToRawInfo()) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go index 0be06989..6eee935b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -30,6 +30,7 @@ import ( // in GetCanonicalTypeName means Go type names with full package path. // // Examples of REST friendly OpenAPI name: +// // Input: k8s.io/api/core/v1.Pod // Output: io.k8s.api.core.v1.Pod // @@ -45,6 +46,7 @@ func ToCanonicalName(name string) string { // ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name. // // Examples of REST friendly OpenAPI name: +// // Input: k8s.io/api/core/v1.Pod // Output: io.k8s.api.core.v1.Pod // @@ -71,18 +73,21 @@ func ToRESTFriendlyName(name string) string { // OpenAPI canonical names are Go type names with full package path, for uniquely indentifying // a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/" // should be used. For custom resource definition (CRD), the canonical name is expected to be -// group/version.kind +// +// group/version.kind // // Examples of canonical name: -// Go type: k8s.io/kubernetes/pkg/apis/core.Pod -// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo +// +// Go type: k8s.io/kubernetes/pkg/apis/core.Pod +// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo // // Example for vendored Go type: -// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod -// Canonical name: k8s.io/api/core/v1.Pod // -// Original full path: vendor/k8s.io/api/core/v1.Pod -// Canonical name: k8s.io/api/core/v1.Pod +// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod +// Canonical name: k8s.io/api/core/v1.Pod +// +// Original full path: vendor/k8s.io/api/core/v1.Pod +// Canonical name: k8s.io/api/core/v1.Pod type OpenAPICanonicalTypeNamer interface { OpenAPICanonicalTypeName() string } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go deleted file mode 100644 index c66f998f..00000000 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go +++ /dev/null @@ -1,502 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "github.com/go-openapi/jsonreference" - "github.com/google/go-cmp/cmp" - fuzz "github.com/google/gofuzz" -) - -var SwaggerFuzzFuncs []interface{} = []interface{}{ - func(v *Responses, c fuzz.Continue) { - c.FuzzNoCustom(v) - if v.Default != nil { - // Check if we hit maxDepth and left an incomplete value - if v.Default.Description == "" { - v.Default = nil - v.StatusCodeResponses = nil - } - } - - // conversion has no way to discern empty statusCodeResponses from - // nil, since "default" is always included in the map. - // So avoid empty responses list - if len(v.StatusCodeResponses) == 0 { - v.StatusCodeResponses = nil - } - }, - func(v *Operation, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v != nil { - // force non-nil - v.Responses = &Responses{} - c.Fuzz(v.Responses) - - v.Schemes = nil - if c.RandBool() { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - } - }, - func(v map[int]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Prevent negative numbers - num := c.Intn(4) - for i := 0; i < num+2; i++ { - val := Response{} - c.Fuzz(&val) - - val.Description = c.RandString() + "x" - v[100*(i+1)+c.Intn(100)] = val - } - }, - func(v map[string]PathItem, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - num := c.Intn(5) - for i := 0; i < num+2; i++ { - val := PathItem{} - c.Fuzz(&val) - - // Ref params are only allowed in certain locations, so - // possibly add a few to PathItems - numRefsToAdd := c.Intn(5) - for i := 0; i < numRefsToAdd; i++ { - theRef := Parameter{} - c.Fuzz(&theRef.Refable) - - val.Parameters = append(val.Parameters, theRef) - } - - v["/"+c.RandString()] = val - } - }, - func(v *SchemaOrArray, c fuzz.Continue) { - *v = SchemaOrArray{} - // gnostic parser just doesn't support more - // than one Schema here - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - }, - func(v *SchemaOrBool, c fuzz.Continue) { - *v = SchemaOrBool{} - - if c.RandBool() { - v.Allows = c.RandBool() - } else { - v.Schema = &Schema{} - v.Allows = true - c.Fuzz(&v.Schema) - } - }, - func(v map[string]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Response definitions are not allowed to - // be refs - for i := 0; i < c.Intn(5)+1; i++ { - resp := &Response{} - - c.Fuzz(resp) - resp.Ref = Ref{} - resp.Description = c.RandString() + "x" - - // Response refs are not vendor extensible by gnostic - resp.VendorExtensible.Extensions = nil - v[c.RandString()+"x"] = *resp - } - }, - func(v *Header, c fuzz.Continue) { - if v != nil { - c.FuzzNoCustom(v) - - // descendant Items of Header may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Ref, c fuzz.Continue) { - *v = Ref{} - v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - }, - func(v *Response, c fuzz.Continue) { - *v = Response{} - if c.RandBool() { - v.Ref = Ref{} - v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - } else { - c.Fuzz(&v.VendorExtensible) - c.Fuzz(&v.Schema) - c.Fuzz(&v.ResponseProps) - - v.Headers = nil - v.Ref = Ref{} - - n := 0 - c.Fuzz(&n) - if n != 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - num := c.Intn(4) - for i := 0; i < num; i++ { - if v.Headers == nil { - v.Headers = make(map[string]Header) - } - hdr := Header{} - c.Fuzz(&hdr) - if hdr.Type == "" { - // hit maxDepth, just abort trying to make haders - v.Headers = nil - break - } - v.Headers[c.RandString()+"x"] = hdr - } - } else { - v.Headers = nil - } - } - - v.Description = c.RandString() + "x" - - // Gnostic parses empty as nil, so to keep avoid putting empty - if len(v.Headers) == 0 { - v.Headers = nil - } - }, - func(v **Info, c fuzz.Continue) { - // Info is never nil - *v = &Info{} - c.FuzzNoCustom(*v) - - (*v).Title = c.RandString() + "x" - }, - func(v *Extensions, c fuzz.Continue) { - // gnostic parser only picks up x- vendor extensions - numChildren := c.Intn(5) - for i := 0; i < numChildren; i++ { - if *v == nil { - *v = Extensions{} - } - (*v)["x-"+c.RandString()] = c.RandString() - } - }, - func(v *Swagger, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v.Paths == nil { - // Force paths non-nil since it does not have omitempty in json tag. - // This means a perfect roundtrip (via json) is impossible, - // since we can't tell the difference between empty/unspecified paths - v.Paths = &Paths{} - c.Fuzz(v.Paths) - } - - v.Swagger = "2.0" - - // Gnostic support serializing ID at all - // unavoidable data loss - v.ID = "" - - v.Schemes = nil - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - }, - func(v *SecurityScheme, c fuzz.Continue) { - v.Description = c.RandString() + "x" - c.Fuzz(&v.VendorExtensible) - - switch c.Intn(3) { - case 0: - v.Type = "basic" - case 1: - v.Type = "apiKey" - switch c.Intn(2) { - case 0: - v.In = "header" - case 1: - v.In = "query" - default: - panic("unreachable") - } - v.Name = "x" + c.RandString() - case 2: - v.Type = "oauth2" - - switch c.Intn(4) { - case 0: - v.Flow = "accessCode" - v.TokenURL = "https://" + c.RandString() - v.AuthorizationURL = "https://" + c.RandString() - case 1: - v.Flow = "application" - v.TokenURL = "https://" + c.RandString() - case 2: - v.Flow = "implicit" - v.AuthorizationURL = "https://" + c.RandString() - case 3: - v.Flow = "password" - v.TokenURL = "https://" + c.RandString() - default: - panic("unreachable") - } - c.Fuzz(&v.Scopes) - default: - panic("unreachable") - } - }, - func(v *interface{}, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *string, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *ExternalDocumentation, c fuzz.Continue) { - v.Description = c.RandString() + "x" - v.URL = c.RandString() + "x" - }, - func(v *SimpleSchema, c fuzz.Continue) { - c.FuzzNoCustom(v) - - switch c.Intn(5) { - case 0: - v.Type = "string" - case 1: - v.Type = "number" - case 2: - v.Type = "boolean" - case 3: - v.Type = "integer" - case 4: - v.Type = "array" - default: - panic("unreachable") - } - - switch c.Intn(5) { - case 0: - v.CollectionFormat = "csv" - case 1: - v.CollectionFormat = "ssv" - case 2: - v.CollectionFormat = "tsv" - case 3: - v.CollectionFormat = "pipes" - case 4: - v.CollectionFormat = "" - default: - panic("unreachable") - } - - // None of the types which include SimpleSchema in our definitions - // actually support "example" in the official spec - v.Example = nil - - // unsupported by openapi - v.Nullable = false - }, - func(v *int64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0 { - *v = 1 - } - }, - func(v *float64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0.0 { - *v = 1.0 - } - }, - func(v *Parameter, c fuzz.Continue) { - if v == nil { - return - } - c.Fuzz(&v.VendorExtensible) - if c.RandBool() { - // body param - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - v.In = "body" - c.Fuzz(&v.Description) - c.Fuzz(&v.Required) - - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - } else { - c.Fuzz(&v.SimpleSchema) - c.Fuzz(&v.CommonValidations) - v.AllowEmptyValue = false - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - - switch c.Intn(4) { - case 0: - // Header param - v.In = "header" - case 1: - // Form data param - v.In = "formData" - v.AllowEmptyValue = c.RandBool() - case 2: - // Query param - v.In = "query" - v.AllowEmptyValue = c.RandBool() - case 3: - // Path param - v.In = "path" - v.Required = true - default: - panic("unreachable") - } - - // descendant Items of Parameter may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Schema, c fuzz.Continue) { - if c.RandBool() { - // file schema - c.Fuzz(&v.Default) - c.Fuzz(&v.Description) - c.Fuzz(&v.Example) - c.Fuzz(&v.ExternalDocs) - - c.Fuzz(&v.Format) - c.Fuzz(&v.ReadOnly) - c.Fuzz(&v.Required) - c.Fuzz(&v.Title) - v.Type = StringOrArray{"file"} - - } else { - // normal schema - c.Fuzz(&v.SchemaProps) - c.Fuzz(&v.SwaggerSchemaProps) - c.Fuzz(&v.VendorExtensible) - // c.Fuzz(&v.ExtraProps) - // ExtraProps will not roundtrip - gnostic throws out - // unrecognized keys - } - - // Not supported by official openapi v2 spec - // and stripped by k8s apiserver - v.ID = "" - v.AnyOf = nil - v.OneOf = nil - v.Not = nil - v.Nullable = false - v.AdditionalItems = nil - v.Schema = "" - v.PatternProperties = nil - v.Definitions = nil - v.Dependencies = nil - }, -} - -var SwaggerDiffOptions = []cmp.Option{ - // cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields - cmp.Comparer(func(a Ref, b Ref) bool { - return a.String() == b.String() - }), -} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go index 406a09d9..6a77f2ac 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go @@ -21,7 +21,7 @@ import ( "strconv" "github.com/go-openapi/jsonreference" - openapi_v2 "github.com/google/gnostic/openapiv2" + openapi_v2 "github.com/google/gnostic-models/openapiv2" ) // Interfaces diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go index 9a255630..05310c46 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go @@ -43,6 +43,9 @@ type Header struct { // MarshalJSON marshal this to JSON func (h Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.CommonValidations) if err != nil { return nil, err @@ -62,6 +65,20 @@ func (h Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4), nil } +func (h Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Extensions + HeaderProps + } + x.CommonValidations = commonValidationsOmitZero(h.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(h.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = h.HeaderProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals this header from JSON func (h *Header) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -94,12 +111,8 @@ func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Dec h.CommonValidations = x.CommonValidations h.SimpleSchema = x.SimpleSchema - h.Extensions = x.Extensions + h.Extensions = internal.SanitizeExtensions(x.Extensions) h.HeaderProps = x.HeaderProps - h.Extensions.sanitize() - if len(h.Extensions) == 0 { - h.Extensions = nil - } return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go index 395ececa..d667b705 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go @@ -89,17 +89,9 @@ func (e Extensions) GetObject(key string, out interface{}) error { return nil } -func (e Extensions) sanitize() { - for k := range e { - if !isExtensionKey(k) { - delete(e, k) - } - } -} - func (e Extensions) sanitizeWithExtra() (extra map[string]any) { for k, v := range e { - if !isExtensionKey(k) { + if !internal.IsExtensionKey(k) { if extra == nil { extra = make(map[string]any) } @@ -110,10 +102,6 @@ func (e Extensions) sanitizeWithExtra() (extra map[string]any) { return extra } -func isExtensionKey(k string) bool { - return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' -} - // VendorExtensible composition block. type VendorExtensible struct { Extensions Extensions @@ -181,6 +169,9 @@ type Info struct { // MarshalJSON marshal this to JSON func (i Info) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.InfoProps) if err != nil { return nil, err @@ -192,6 +183,16 @@ func (i Info) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (i Info) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + InfoProps + } + x.Extensions = i.Extensions + x.InfoProps = i.InfoProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (i *Info) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -212,11 +213,7 @@ func (i *Info) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decod if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - i.VendorExtensible.Extensions = x.Extensions + i.Extensions = internal.SanitizeExtensions(x.Extensions) i.InfoProps = x.InfoProps return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go index 374f90d2..4132467d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go @@ -37,6 +37,18 @@ type SimpleSchema struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type simpleSchemaOmitZero struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitzero"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + // CommonValidations describe common JSON-schema validations type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` @@ -53,6 +65,23 @@ type CommonValidations struct { Enum []interface{} `json:"enum,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type commonValidationsOmitZero struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // @@ -105,18 +134,18 @@ func (i *Items) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco if err := i.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } + i.CommonValidations = x.CommonValidations i.SimpleSchema = x.SimpleSchema - i.VendorExtensible.Extensions = x.Extensions + i.Extensions = internal.SanitizeExtensions(x.Extensions) return nil } // MarshalJSON converts this items object to JSON func (i Items) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.CommonValidations) if err != nil { return nil, err @@ -135,3 +164,17 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b4, b3, b1, b2), nil } + +func (i Items) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(i.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(i.SimpleSchema) + x.Ref = i.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(i.Extensions) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go index 923769ae..63eed346 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go @@ -42,6 +42,23 @@ type OperationProps struct { Responses *Responses `json:"responses,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type operationPropsOmitZero struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty,omitzero"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitzero"` +} + // MarshalJSON takes care of serializing operation properties to JSON // // We use a custom marhaller here to handle a special cases related to @@ -96,17 +113,16 @@ func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - o.VendorExtensible.Extensions = x.Extensions + o.Extensions = internal.SanitizeExtensions(x.Extensions) o.OperationProps = OperationProps(x.OperationPropsNoMethods) return nil } // MarshalJSON converts this items object to JSON func (o Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -118,3 +134,13 @@ func (o Operation) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (o Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go index 7cb229ac..53d1e0aa 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go @@ -36,6 +36,17 @@ type ParamProps struct { AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type paramPropsOmitZero struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitzero"` + Schema *Schema `json:"schema,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` +} + // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. @@ -109,19 +120,18 @@ func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } p.CommonValidations = x.CommonValidations p.SimpleSchema = x.SimpleSchema - p.VendorExtensible.Extensions = x.Extensions + p.Extensions = internal.SanitizeExtensions(x.Extensions) p.ParamProps = x.ParamProps return nil } // MarshalJSON converts this items object to JSON func (p Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.CommonValidations) if err != nil { return nil, err @@ -144,3 +154,19 @@ func (p Parameter) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2, b4, b5), nil } + +func (p Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + ParamProps paramPropsOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(p.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(p.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParamProps = paramPropsOmitZero(p.ParamProps) + x.Ref = p.Refable.Ref.String() + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go index 03741fcf..1d1588cb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go @@ -70,24 +70,20 @@ func (p *PathItem) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - - p.Extensions = x.Extensions - p.PathItemProps = x.PathItemProps - - if err := p.Refable.Ref.fromMap(p.Extensions); err != nil { + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - - p.Extensions.sanitize() - if len(p.Extensions) == 0 { - p.Extensions = nil - } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathItemProps = x.PathItemProps return nil } // MarshalJSON converts this items object to JSON func (p PathItem) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b3, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -103,3 +99,15 @@ func (p PathItem) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b3, b4, b5) return concated, nil } + +func (p PathItem) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + PathItemProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathItemProps = p.PathItemProps + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go index 7c63d440..18f6a9f4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go @@ -92,7 +92,7 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco } switch k := tok.String(); { - case isExtensionKey(k): + case internal.IsExtensionKey(k): ext = nil if err := opts.UnmarshalNext(dec, &ext); err != nil { return err @@ -114,7 +114,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco p.Paths[k] = pi default: _, err := dec.ReadValue() // skip value - return err + if err != nil { + return err + } } } default: @@ -124,6 +126,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco // MarshalJSON converts this items object to JSON func (p Paths) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err @@ -142,3 +147,18 @@ func (p Paths) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (p Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go index 1405bfd8..775b3b0c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go @@ -21,6 +21,8 @@ import ( "path/filepath" "github.com/go-openapi/jsonreference" + + "k8s.io/kube-openapi/pkg/internal" ) // Refable is a struct for things that accept a $ref property @@ -149,19 +151,5 @@ func (r *Ref) UnmarshalJSON(d []byte) error { } func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil + return internal.JSONRefFromMap(&r.Ref, v) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go index f01364b7..3ff1fe13 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go @@ -30,6 +30,15 @@ type ResponseProps struct { Examples map[string]interface{} `json:"examples,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type responsePropsOmitZero struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitzero"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + // Response describes a single response from an API Operation. // // For more information: http://goo.gl/8us55a#responseObject @@ -68,23 +77,20 @@ func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D return err } - r.Extensions = x.Extensions - r.ResponseProps = x.ResponseProps - - if err := r.Refable.Ref.fromMap(r.Extensions); err != nil { + if err := r.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - - r.Extensions.sanitize() - if len(r.Extensions) == 0 { - r.Extensions = nil - } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps return nil } // MarshalJSON converts this items object to JSON func (r Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponseProps) if err != nil { return nil, err @@ -100,6 +106,18 @@ func (r Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + ResponseProps responsePropsOmitZero `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = responsePropsOmitZero(r.ResponseProps) + return opts.MarshalNext(enc, x) +} + // NewResponse creates a new response instance func NewResponse() *Response { return new(Response) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go index c3fa6819..d9ad760a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go @@ -63,6 +63,9 @@ func (r *Responses) UnmarshalJSON(data []byte) error { // MarshalJSON converts this items object to JSON func (r Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -75,6 +78,25 @@ func (r Responses) MarshalJSON() ([]byte, error) { return concated, nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + // ResponsesProps describes all responses for an operation. // It tells what is the default response and maps all responses with a // HTTP status code. @@ -148,7 +170,7 @@ func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. return nil } switch k := tok.String(); { - case isExtensionKey(k): + case internal.IsExtensionKey(k): ext = nil if err := opts.UnmarshalNext(dec, &ext); err != nil { return err diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go index 9add0c16..dfbb2e05 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go @@ -196,6 +196,46 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type schemaPropsOmitZero struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitzero"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitzero"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitzero"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitzero"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitzero"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitzero"` + Definitions Definitions `json:"definitions,omitempty"` +} + // SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` @@ -204,6 +244,15 @@ type SwaggerSchemaProps struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type swaggerSchemaPropsOmitZero struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitzero"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + Example interface{} `json:"example,omitempty"` +} + // Schema the schema object allows the definition of input and output data types. // These types can be objects, but also primitives and arrays. // This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) @@ -434,6 +483,9 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema { // MarshalJSON marshal this to JSON func (s Schema) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SchemaProps) if err != nil { return nil, fmt.Errorf("schema props %v", err) @@ -465,6 +517,31 @@ func (s Schema) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil } +func (s Schema) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + SchemaProps schemaPropsOmitZero `json:",inline"` + SwaggerSchemaProps swaggerSchemaPropsOmitZero `json:",inline"` + Schema string `json:"$schema,omitempty"` + Ref string `json:"$ref,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(s.Extensions)+len(s.ExtraProps)) + for k, v := range s.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range s.ExtraProps { + x.ArbitraryKeys[k] = v + } + x.SchemaProps = schemaPropsOmitZero(s.SchemaProps) + x.SwaggerSchemaProps = swaggerSchemaPropsOmitZero(s.SwaggerSchemaProps) + x.Ref = s.Ref.String() + x.Schema = string(s.Schema) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *Schema) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -547,7 +624,7 @@ func (s *Schema) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Dec } s.ExtraProps = x.Extensions.sanitizeWithExtra() - s.VendorExtensible.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SchemaProps = x.SchemaProps s.SwaggerSchemaProps = x.SwaggerSchemaProps return nil diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go index 34723fb7..e2b7da14 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go @@ -18,6 +18,7 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) @@ -45,6 +46,9 @@ type SecurityScheme struct { // MarshalJSON marshal this to JSON func (s SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -56,6 +60,16 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { @@ -72,11 +86,7 @@ func (s *SecurityScheme) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *js if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - s.VendorExtensible.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SecuritySchemeProps = x.SecuritySchemeProps return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go index f6cb7da3..c8f3beaa 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go @@ -35,6 +35,9 @@ type Swagger struct { // MarshalJSON marshals this swagger structure to json func (s Swagger) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SwaggerProps) if err != nil { return nil, err @@ -46,12 +49,22 @@ func (s Swagger) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SwaggerProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SwaggerProps = s.SwaggerProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals a swagger spec from json func (s *Swagger) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { return jsonv2.Unmarshal(data, s) } - var sw Swagger if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { return err @@ -75,15 +88,8 @@ func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.De if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - - s.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SwaggerProps = x.SwaggerProps - - s.Extensions.sanitize() - if len(s.Extensions) == 0 { - s.Extensions = nil - } - return nil } @@ -126,6 +132,9 @@ var jsFalse = []byte("false") // MarshalJSON convert this object to JSON func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if s.Schema != nil { return json.Marshal(s.Schema) } @@ -136,6 +145,18 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { return jsTrue, nil } +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + + if s.Schema == nil && !s.Allows { + return enc.WriteToken(jsonv2.False) + } + return enc.WriteToken(jsonv2.True) +} + // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -143,15 +164,15 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { } var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch + if len(data) > 0 && data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Schema = &sch + nw.Allows = true + } else { + json.Unmarshal(data, &nw.Allows) } *s = nw return nil @@ -185,6 +206,9 @@ type SchemaOrStringArray struct { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if len(s.Property) > 0 { return json.Marshal(s.Property) } @@ -194,6 +218,17 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { return []byte("null"), nil } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if len(s.Property) > 0 { + return opts.MarshalNext(enc, s.Property) + } + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + return enc.WriteToken(jsonv2.Null) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -347,12 +382,23 @@ func (s *SchemaOrArray) ContainsType(name string) bool { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + if s.Schemas != nil { return json.Marshal(s.Schemas) } return json.Marshal(s.Schema) } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schemas != nil { + return opts.MarshalNext(enc, s.Schemas) + } + return opts.MarshalNext(enc, s.Schema) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go index 69e93b60..d105d52c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go @@ -41,6 +41,9 @@ type Tag struct { // MarshalJSON marshal this to JSON func (t Tag) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(t) + } b1, err := json.Marshal(t.TagProps) if err != nil { return nil, err @@ -52,6 +55,16 @@ func (t Tag) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (t Tag) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + TagProps + } + x.Extensions = internal.SanitizeExtensions(t.Extensions) + x.TagProps = t.TagProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (t *Tag) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -72,11 +85,7 @@ func (t *Tag) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decode if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - t.VendorExtensible.Extensions = x.Extensions + t.Extensions = internal.SanitizeExtensions(x.Extensions) t.TagProps = x.TagProps return nil } diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go index e4e740ca..e0811e83 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -16,6 +16,8 @@ limitations under the License. package integer +import "math" + // IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { @@ -65,9 +67,7 @@ func Int64Min(a, b int64) int64 { } // RoundToInt32 rounds floats into integer numbers. +// Deprecated: use math.Round() and a cast directly. func RoundToInt32(a float64) int32 { - if a < 0 { - return int32(a - 0.5) - } - return int32(a + 0.5) + return int32(math.Round(a)) } diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 00000000..7cb7795b --- /dev/null +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index b8103223..b673a642 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -14,12 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Deprecated: Use functions in k8s.io/utils/ptr instead: ptr.To to obtain +// a pointer, ptr.Deref to dereference a pointer, ptr.Equal to compare +// dereferenced pointers. package pointer import ( - "fmt" - "reflect" "time" + + "k8s.io/utils/ptr" ) // AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, @@ -28,383 +31,219 @@ import ( // // This function is only valid for structs and pointers to structs. Any other // type will cause a panic. Passing a typed nil pointer will return true. -func AllPtrFieldsNil(obj interface{}) bool { - v := reflect.ValueOf(obj) - if !v.IsValid() { - panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return true - } - v = v.Elem() - } - for i := 0; i < v.NumField(); i++ { - if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { - return false - } - } - return true -} - -// Int returns a pointer to an int -func Int(i int) *int { - return &i -} +// +// Deprecated: Use ptr.AllPtrFieldsNil instead. +var AllPtrFieldsNil = ptr.AllPtrFieldsNil + +// Int returns a pointer to an int. +var Int = ptr.To[int] // IntPtr is a function variable referring to Int. // -// Deprecated: Use Int instead. +// Deprecated: Use ptr.To instead. var IntPtr = Int // for back-compat // IntDeref dereferences the int ptr and returns it if not nil, or else // returns def. -func IntDeref(ptr *int, def int) int { - if ptr != nil { - return *ptr - } - return def -} +var IntDeref = ptr.Deref[int] // IntPtrDerefOr is a function variable referring to IntDeref. // -// Deprecated: Use IntDeref instead. +// Deprecated: Use ptr.Deref instead. var IntPtrDerefOr = IntDeref // for back-compat // Int32 returns a pointer to an int32. -func Int32(i int32) *int32 { - return &i -} +var Int32 = ptr.To[int32] // Int32Ptr is a function variable referring to Int32. // -// Deprecated: Use Int32 instead. +// Deprecated: Use ptr.To instead. var Int32Ptr = Int32 // for back-compat // Int32Deref dereferences the int32 ptr and returns it if not nil, or else // returns def. -func Int32Deref(ptr *int32, def int32) int32 { - if ptr != nil { - return *ptr - } - return def -} +var Int32Deref = ptr.Deref[int32] // Int32PtrDerefOr is a function variable referring to Int32Deref. // -// Deprecated: Use Int32Deref instead. +// Deprecated: Use ptr.Deref instead. var Int32PtrDerefOr = Int32Deref // for back-compat // Int32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Int32Equal(a, b *int32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Int32Equal = ptr.Equal[int32] // Uint returns a pointer to an uint -func Uint(i uint) *uint { - return &i -} +var Uint = ptr.To[uint] // UintPtr is a function variable referring to Uint. // -// Deprecated: Use Uint instead. +// Deprecated: Use ptr.To instead. var UintPtr = Uint // for back-compat // UintDeref dereferences the uint ptr and returns it if not nil, or else // returns def. -func UintDeref(ptr *uint, def uint) uint { - if ptr != nil { - return *ptr - } - return def -} +var UintDeref = ptr.Deref[uint] // UintPtrDerefOr is a function variable referring to UintDeref. // -// Deprecated: Use UintDeref instead. +// Deprecated: Use ptr.Deref instead. var UintPtrDerefOr = UintDeref // for back-compat // Uint32 returns a pointer to an uint32. -func Uint32(i uint32) *uint32 { - return &i -} +var Uint32 = ptr.To[uint32] // Uint32Ptr is a function variable referring to Uint32. // -// Deprecated: Use Uint32 instead. +// Deprecated: Use ptr.To instead. var Uint32Ptr = Uint32 // for back-compat // Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else // returns def. -func Uint32Deref(ptr *uint32, def uint32) uint32 { - if ptr != nil { - return *ptr - } - return def -} +var Uint32Deref = ptr.Deref[uint32] // Uint32PtrDerefOr is a function variable referring to Uint32Deref. // -// Deprecated: Use Uint32Deref instead. +// Deprecated: Use ptr.Deref instead. var Uint32PtrDerefOr = Uint32Deref // for back-compat // Uint32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Uint32Equal(a, b *uint32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Uint32Equal = ptr.Equal[uint32] // Int64 returns a pointer to an int64. -func Int64(i int64) *int64 { - return &i -} +var Int64 = ptr.To[int64] // Int64Ptr is a function variable referring to Int64. // -// Deprecated: Use Int64 instead. +// Deprecated: Use ptr.To instead. var Int64Ptr = Int64 // for back-compat // Int64Deref dereferences the int64 ptr and returns it if not nil, or else // returns def. -func Int64Deref(ptr *int64, def int64) int64 { - if ptr != nil { - return *ptr - } - return def -} +var Int64Deref = ptr.Deref[int64] // Int64PtrDerefOr is a function variable referring to Int64Deref. // -// Deprecated: Use Int64Deref instead. +// Deprecated: Use ptr.Deref instead. var Int64PtrDerefOr = Int64Deref // for back-compat // Int64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Int64Equal(a, b *int64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Int64Equal = ptr.Equal[int64] // Uint64 returns a pointer to an uint64. -func Uint64(i uint64) *uint64 { - return &i -} +var Uint64 = ptr.To[uint64] // Uint64Ptr is a function variable referring to Uint64. // -// Deprecated: Use Uint64 instead. +// Deprecated: Use ptr.To instead. var Uint64Ptr = Uint64 // for back-compat // Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else // returns def. -func Uint64Deref(ptr *uint64, def uint64) uint64 { - if ptr != nil { - return *ptr - } - return def -} +var Uint64Deref = ptr.Deref[uint64] // Uint64PtrDerefOr is a function variable referring to Uint64Deref. // -// Deprecated: Use Uint64Deref instead. +// Deprecated: Use ptr.Deref instead. var Uint64PtrDerefOr = Uint64Deref // for back-compat // Uint64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Uint64Equal(a, b *uint64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Uint64Equal = ptr.Equal[uint64] // Bool returns a pointer to a bool. -func Bool(b bool) *bool { - return &b -} +var Bool = ptr.To[bool] // BoolPtr is a function variable referring to Bool. // -// Deprecated: Use Bool instead. +// Deprecated: Use ptr.To instead. var BoolPtr = Bool // for back-compat // BoolDeref dereferences the bool ptr and returns it if not nil, or else // returns def. -func BoolDeref(ptr *bool, def bool) bool { - if ptr != nil { - return *ptr - } - return def -} +var BoolDeref = ptr.Deref[bool] // BoolPtrDerefOr is a function variable referring to BoolDeref. // -// Deprecated: Use BoolDeref instead. +// Deprecated: Use ptr.Deref instead. var BoolPtrDerefOr = BoolDeref // for back-compat // BoolEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func BoolEqual(a, b *bool) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var BoolEqual = ptr.Equal[bool] // String returns a pointer to a string. -func String(s string) *string { - return &s -} +var String = ptr.To[string] // StringPtr is a function variable referring to String. // -// Deprecated: Use String instead. +// Deprecated: Use ptr.To instead. var StringPtr = String // for back-compat // StringDeref dereferences the string ptr and returns it if not nil, or else // returns def. -func StringDeref(ptr *string, def string) string { - if ptr != nil { - return *ptr - } - return def -} +var StringDeref = ptr.Deref[string] // StringPtrDerefOr is a function variable referring to StringDeref. // -// Deprecated: Use StringDeref instead. +// Deprecated: Use ptr.Deref instead. var StringPtrDerefOr = StringDeref // for back-compat // StringEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func StringEqual(a, b *string) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var StringEqual = ptr.Equal[string] // Float32 returns a pointer to a float32. -func Float32(i float32) *float32 { - return &i -} +var Float32 = ptr.To[float32] // Float32Ptr is a function variable referring to Float32. // -// Deprecated: Use Float32 instead. +// Deprecated: Use ptr.To instead. var Float32Ptr = Float32 // Float32Deref dereferences the float32 ptr and returns it if not nil, or else // returns def. -func Float32Deref(ptr *float32, def float32) float32 { - if ptr != nil { - return *ptr - } - return def -} +var Float32Deref = ptr.Deref[float32] // Float32PtrDerefOr is a function variable referring to Float32Deref. // -// Deprecated: Use Float32Deref instead. +// Deprecated: Use ptr.Deref instead. var Float32PtrDerefOr = Float32Deref // for back-compat // Float32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Float32Equal(a, b *float32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Float32Equal = ptr.Equal[float32] // Float64 returns a pointer to a float64. -func Float64(i float64) *float64 { - return &i -} +var Float64 = ptr.To[float64] // Float64Ptr is a function variable referring to Float64. // -// Deprecated: Use Float64 instead. +// Deprecated: Use ptr.To instead. var Float64Ptr = Float64 // Float64Deref dereferences the float64 ptr and returns it if not nil, or else // returns def. -func Float64Deref(ptr *float64, def float64) float64 { - if ptr != nil { - return *ptr - } - return def -} +var Float64Deref = ptr.Deref[float64] // Float64PtrDerefOr is a function variable referring to Float64Deref. // -// Deprecated: Use Float64Deref instead. +// Deprecated: Use ptr.Deref instead. var Float64PtrDerefOr = Float64Deref // for back-compat // Float64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Float64Equal(a, b *float64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Float64Equal = ptr.Equal[float64] // Duration returns a pointer to a time.Duration. -func Duration(d time.Duration) *time.Duration { - return &d -} +var Duration = ptr.To[time.Duration] // DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else // returns def. -func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { - if ptr != nil { - return *ptr - } - return def -} +var DurationDeref = ptr.Deref[time.Duration] // DurationEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func DurationEqual(a, b *time.Duration) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var DurationEqual = ptr.Equal[time.Duration] diff --git a/vendor/k8s.io/utils/ptr/OWNERS b/vendor/k8s.io/utils/ptr/OWNERS new file mode 100644 index 00000000..0d639275 --- /dev/null +++ b/vendor/k8s.io/utils/ptr/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/ptr/README.md b/vendor/k8s.io/utils/ptr/README.md new file mode 100644 index 00000000..2ca8073d --- /dev/null +++ b/vendor/k8s.io/utils/ptr/README.md @@ -0,0 +1,3 @@ +# Pointer + +This package provides some functions for pointer-based operations. diff --git a/vendor/k8s.io/utils/ptr/ptr.go b/vendor/k8s.io/utils/ptr/ptr.go new file mode 100644 index 00000000..659ed3b9 --- /dev/null +++ b/vendor/k8s.io/utils/ptr/ptr.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// To returns a pointer to the given value. +func To[T any](v T) *T { + return &v +} + +// Deref dereferences ptr and returns the value it points to if no nil, or else +// returns def. +func Deref[T any](ptr *T, def T) T { + if ptr != nil { + return *ptr + } + return def +} + +// Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Equal[T comparable](a, b *T) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index a0b07a6d..559aebb5 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -65,6 +65,11 @@ func durationToMilliseconds(timeDuration time.Duration) int64 { } type traceItem interface { + // rLock must be called before invoking time or writeItem. + rLock() + // rUnlock must be called after processing the item is complete. + rUnlock() + // time returns when the trace was recorded as completed. time() time.Time // writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the @@ -79,6 +84,10 @@ type traceStep struct { fields []Field } +// rLock doesn't need to do anything because traceStep instances are immutable. +func (s traceStep) rLock() {} +func (s traceStep) rUnlock() {} + func (s traceStep) time() time.Time { return s.stepTime } @@ -106,6 +115,14 @@ type Trace struct { traceItems []traceItem } +func (t *Trace) rLock() { + t.lock.RLock() +} + +func (t *Trace) rUnlock() { + t.lock.RUnlock() +} + func (t *Trace) time() time.Time { if t.endTime != nil { return *t.endTime @@ -175,7 +192,7 @@ func (t *Trace) Log() { t.endTime = &endTime t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level - if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace + if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() } } @@ -231,8 +248,10 @@ func (t *Trace) logTrace() { func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) { lastStepTime := t.startTime for _, stepOrTrace := range t.traceItems { + stepOrTrace.rLock() stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold) lastStepTime = stepOrTrace.time() + stepOrTrace.rUnlock() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1c0d7ee9..e02f74ef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -81,7 +81,7 @@ github.com/docker/go-connections/tlsconfig # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/emicklei/go-restful/v3 v3.9.0 +# github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -102,7 +102,10 @@ github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/go-logr/logr v1.3.0 +# github.com/fxamacker/cbor/v2 v2.7.0 +## explicit; go 1.17 +github.com/fxamacker/cbor/v2 +# github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -173,9 +176,9 @@ github.com/go-swagger/go-swagger/cmd/swagger/commands/initcmd github.com/go-swagger/go-swagger/codescan github.com/go-swagger/go-swagger/generator github.com/go-swagger/go-swagger/scan -# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 -## explicit; go 1.13 -github.com/go-task/slim-sprig +# github.com/go-task/slim-sprig/v3 v3.0.0 +## explicit; go 1.20 +github.com/go-task/slim-sprig/v3 # github.com/gobuffalo/flect v0.2.5 ## explicit; go 1.13 github.com/gobuffalo/flect @@ -191,8 +194,8 @@ github.com/golang/groupcache/lru # github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock -# github.com/golang/protobuf v1.5.3 -## explicit; go 1.9 +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -227,7 +230,13 @@ github.com/google/gnostic/compiler github.com/google/gnostic/extensions github.com/google/gnostic/jsonschema github.com/google/gnostic/openapiv2 -github.com/google/gnostic/openapiv3 +# github.com/google/gnostic-models v0.6.8 +## explicit; go 1.18 +github.com/google/gnostic-models/compiler +github.com/google/gnostic-models/extensions +github.com/google/gnostic-models/jsonschema +github.com/google/gnostic-models/openapiv2 +github.com/google/gnostic-models/openapiv3 # github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 github.com/google/go-cmp/cmp @@ -244,10 +253,10 @@ github.com/google/gofuzz/bytesource github.com/google/gops/agent github.com/google/gops/internal github.com/google/gops/signal -# github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 -## explicit; go 1.18 +# github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af +## explicit; go 1.19 github.com/google/pprof/profile -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/gorilla/handlers v1.5.1 @@ -352,8 +361,8 @@ github.com/munnerz/goautoneg # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/onsi/ginkgo/v2 v2.13.2 -## explicit; go 1.18 +# github.com/onsi/ginkgo/v2 v2.19.0 +## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -374,8 +383,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.29.0 -## explicit; go 1.18 +# github.com/onsi/gomega v1.33.1 +## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -447,8 +456,8 @@ github.com/pyroscope-io/godeltaprof/internal/pprof # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron -# github.com/rogpeppe/go-internal v1.11.0 -## explicit; go 1.19 +# github.com/rogpeppe/go-internal v1.12.0 +## explicit; go 1.20 github.com/rogpeppe/go-internal/fmtsort # github.com/sagikazarmark/locafero v0.4.0 ## explicit; go 1.20 @@ -516,6 +525,9 @@ github.com/tklauser/numcpus # github.com/toqueteos/webbrowser v1.2.0 ## explicit; go 1.12 github.com/toqueteos/webbrowser +# github.com/x448/float16 v0.8.4 +## explicit; go 1.11 +github.com/x448/float16 # github.com/yusufpapurcu/wmi v1.2.2 ## explicit; go 1.16 github.com/yusufpapurcu/wmi @@ -635,7 +647,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.19.0 +# golang.org/x/crypto v0.24.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -654,12 +666,12 @@ golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.14.0 +# golang.org/x/mod v0.17.0 ## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.21.0 +# golang.org/x/net v0.26.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -684,22 +696,23 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.5.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 +golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.17.0 +# golang.org/x/sys v0.21.0 ## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.17.0 +# golang.org/x/term v0.21.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.16.0 ## explicit; go 1.18 +golang.org/x/text/cases golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -710,6 +723,7 @@ golang.org/x/text/encoding/korean golang.org/x/text/encoding/simplifiedchinese golang.org/x/text/encoding/traditionalchinese golang.org/x/text/encoding/unicode +golang.org/x/text/internal golang.org/x/text/internal/language golang.org/x/text/internal/language/compact golang.org/x/text/internal/tag @@ -724,9 +738,10 @@ golang.org/x/text/width # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.15.0 -## explicit; go 1.18 +# golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d +## explicit; go 1.19 golang.org/x/tools/cmd/stringer +golang.org/x/tools/cover golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/buildutil @@ -737,20 +752,21 @@ golang.org/x/tools/go/loader golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports +golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label -golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/tokeninternal -golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.2.0 ## explicit; go 1.12 gomodules.xyz/jsonpatch/v2 @@ -837,14 +853,16 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/editiondefaults +google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset @@ -869,6 +887,7 @@ google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/dynamicpb +google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb @@ -951,8 +970,8 @@ k8s.io/api/storage/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 -# k8s.io/apimachinery v0.26.3 -## explicit; go 1.19 +# k8s.io/apimachinery v0.31.1 +## explicit; go 1.22 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -975,6 +994,8 @@ k8s.io/apimachinery/pkg/labels k8s.io/apimachinery/pkg/runtime k8s.io/apimachinery/pkg/runtime/schema k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct +k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/runtime/serializer/protobuf k8s.io/apimachinery/pkg/runtime/serializer/recognizer @@ -984,12 +1005,14 @@ k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types k8s.io/apimachinery/pkg/util/cache k8s.io/apimachinery/pkg/util/diff +k8s.io/apimachinery/pkg/util/dump k8s.io/apimachinery/pkg/util/duration k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/managedfields +k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net @@ -1396,38 +1419,39 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.26.3 -## explicit; go 1.19 +# k8s.io/code-generator v0.31.1 +## explicit; go 1.22 k8s.io/code-generator +k8s.io/code-generator/cmd/applyconfiguration-gen +k8s.io/code-generator/cmd/applyconfiguration-gen/args +k8s.io/code-generator/cmd/applyconfiguration-gen/generators k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/scheme k8s.io/code-generator/cmd/client-gen/generators/util -k8s.io/code-generator/cmd/client-gen/path k8s.io/code-generator/cmd/client-gen/types k8s.io/code-generator/cmd/conversion-gen k8s.io/code-generator/cmd/conversion-gen/args k8s.io/code-generator/cmd/conversion-gen/generators k8s.io/code-generator/cmd/deepcopy-gen k8s.io/code-generator/cmd/deepcopy-gen/args +k8s.io/code-generator/cmd/deepcopy-gen/generators k8s.io/code-generator/cmd/defaulter-gen k8s.io/code-generator/cmd/defaulter-gen/args +k8s.io/code-generator/cmd/defaulter-gen/generators k8s.io/code-generator/cmd/go-to-protobuf k8s.io/code-generator/cmd/go-to-protobuf/protobuf -k8s.io/code-generator/cmd/import-boss k8s.io/code-generator/cmd/informer-gen k8s.io/code-generator/cmd/informer-gen/args k8s.io/code-generator/cmd/informer-gen/generators k8s.io/code-generator/cmd/lister-gen k8s.io/code-generator/cmd/lister-gen/args k8s.io/code-generator/cmd/lister-gen/generators -k8s.io/code-generator/cmd/openapi-gen k8s.io/code-generator/cmd/register-gen k8s.io/code-generator/cmd/register-gen/args k8s.io/code-generator/cmd/register-gen/generators -k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect @@ -1451,36 +1475,34 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/gengo v0.0.0-20220902162205-c0856e24416d -## explicit; go 1.13 -k8s.io/gengo/args -k8s.io/gengo/examples/deepcopy-gen/generators -k8s.io/gengo/examples/defaulter-gen/generators -k8s.io/gengo/examples/import-boss/generators -k8s.io/gengo/examples/set-gen/generators -k8s.io/gengo/examples/set-gen/sets -k8s.io/gengo/generator -k8s.io/gengo/namer -k8s.io/gengo/parser -k8s.io/gengo/types -# k8s.io/klog/v2 v2.90.1 -## explicit; go 1.13 +# k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 +## explicit; go 1.20 +k8s.io/gengo/v2 +k8s.io/gengo/v2/generator +k8s.io/gengo/v2/namer +k8s.io/gengo/v2/parser +k8s.io/gengo/v2/types +# k8s.io/klog/v2 v2.130.1 +## explicit; go 1.18 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity +k8s.io/klog/v2/internal/sloghandler # k8s.io/kms v0.26.3 ## explicit; go 1.19 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2alpha1 -# k8s.io/kube-openapi v0.0.0-20221110221610-a28e98eb7c70 -## explicit; go 1.18 +# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +## explicit; go 1.20 +k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 k8s.io/kube-openapi/pkg/builder3/util +k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/common/restfuladapter k8s.io/kube-openapi/pkg/generators @@ -1488,9 +1510,7 @@ k8s.io/kube-openapi/pkg/generators/rules k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/internal -k8s.io/kube-openapi/pkg/internal/handler k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json -k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 @@ -1501,7 +1521,7 @@ k8s.io/kube-openapi/pkg/validation/spec # k8s.io/kubectl v0.26.3 ## explicit; go 1.19 k8s.io/kubectl/pkg/util/podutils -# k8s.io/utils v0.0.0-20230209194617-a36077c30491 +# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1513,6 +1533,7 @@ k8s.io/utils/lru k8s.io/utils/net k8s.io/utils/path k8s.io/utils/pointer +k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 @@ -1582,17 +1603,18 @@ sigs.k8s.io/controller-tools/pkg/schemapatcher sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml sigs.k8s.io/controller-tools/pkg/version sigs.k8s.io/controller-tools/pkg/webhook -# sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 +# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value -# sigs.k8s.io/yaml v1.3.0 +# sigs.k8s.io/yaml v1.4.0 ## explicit; go 1.12 sigs.k8s.io/yaml +sigs.k8s.io/yaml/goyaml.v2 diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index a6c41936..6a13cf2d 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -75,6 +75,8 @@ import ( // either be any string type, an integer, implement json.Unmarshaler, or // implement encoding.TextUnmarshaler. // +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. @@ -85,14 +87,13 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // Check for well-formedness. // Avoids filling out half a data structure diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index 1f5e3e44..5b67251f 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -77,31 +77,31 @@ import ( // // Examples of struct field tags and their meanings: // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "-". -// Field int `json:"-,"` +// // Field appears in JSON as key "-". +// Field int `json:"-,"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, and ASCII punctuation except quotation @@ -154,7 +154,6 @@ import ( // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an error. -// func Marshal(v any) ([]byte, error) { e := newEncodeState() @@ -784,7 +783,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. - ptr := v.Pointer() + ptr := v.UnsafePointer() if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -877,9 +876,9 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr uintptr + ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe len int - }{v.Pointer(), v.Len()} + }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go index 9e170127..ab249b2b 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go @@ -24,8 +24,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See https://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go index dbaa821b..22fc6922 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go @@ -27,6 +27,7 @@ func Valid(data []byte) bool { // checkValid verifies that data is valid JSON-encoded data. // scan is passed in for use by checkValid to avoid an allocation. +// checkValid returns nil or a SyntaxError. func checkValid(data []byte, scan *scanner) error { scan.reset() for _, c := range data { @@ -42,6 +43,7 @@ func checkValid(data []byte, scan *scanner) error { } // A SyntaxError is a description of a JSON syntax error. +// Unmarshal will return a SyntaxError if the JSON can't be parsed. type SyntaxError struct { msg string // description of error Offset int64 // error occurred after reading Offset bytes diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 6775b4cf..1967755a 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -289,7 +289,6 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token any */ diff --git a/vendor/sigs.k8s.io/json/json.go b/vendor/sigs.k8s.io/json/json.go index d3a42b42..e8f31b16 100644 --- a/vendor/sigs.k8s.io/json/json.go +++ b/vendor/sigs.k8s.io/json/json.go @@ -34,13 +34,13 @@ type Decoder interface { } // NewDecoderCaseSensitivePreserveInts returns a decoder that matches the behavior of encoding/json#NewDecoder, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { d := internaljson.NewDecoder(r) d.CaseSensitive() @@ -51,13 +51,13 @@ func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { // UnmarshalCaseSensitivePreserveInts parses the JSON-encoded data and stores the result in the value pointed to by v. // // UnmarshalCaseSensitivePreserveInts matches the behavior of encoding/json#Unmarshal, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func UnmarshalCaseSensitivePreserveInts(data []byte, v interface{}) error { return internaljson.Unmarshal( data, diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go index 9b14ca58..41fc2474 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -28,20 +28,15 @@ import ( // for PathElementSet and SetNodeMap, so we could probably share the // code. type PathElementValueMap struct { - members sortedPathElementValues + valueMap PathElementMap } func MakePathElementValueMap(size int) PathElementValueMap { return PathElementValueMap{ - members: make(sortedPathElementValues, 0, size), + valueMap: MakePathElementMap(size), } } -type pathElementValue struct { - PathElement PathElement - Value value.Value -} - type sortedPathElementValues []pathElementValue // Implement the sort interface; this would permit bulk creation, which would @@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool { func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } // Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) @@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { return } if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v return } s.members = append(s.members, pathElementValue{}) @@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { // Get retrieves the value associated with the given PathElement from the map. // (nil, false) is returned if there is no such PathElement. -func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go index 75a492d8..f1aa2586 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -112,7 +112,7 @@ func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { set.Set().Iterate(func(p fieldpath.Path) { conflicts = append(conflicts, Conflict{ Manager: manager, - Path: p, + Path: p.Copy(), }) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index 1b23dcbd..d5a977d6 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" ) // Converter is an interface to the conversion logic. The converter @@ -27,19 +28,39 @@ type Converter interface { IsMissingVersionError(error) bool } -// Updater is the object used to compute updated FieldSets and also -// merge the object on Apply. -type Updater struct { +// UpdateBuilder allows you to create a new Updater by exposing all of +// the options and setting them once. +type UpdaterBuilder struct { Converter Converter IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set - enableUnions bool + // Stop comparing the new object with old object after applying. + // This was initially used to avoid spurious etcd update, but + // since that's vastly inefficient, we've come-up with a better + // way of doing that. Create this flag to stop it. + // Comparing has become more expensive too now that we're not using + // `Compare` but `value.Equals` so this gives an option to avoid it. + ReturnInputOnNoop bool } -// EnableUnionFeature turns on union handling. It is disabled by default until the -// feature is complete. -func (s *Updater) EnableUnionFeature() { - s.enableUnions = true +func (u *UpdaterBuilder) BuildUpdater() *Updater { + return &Updater{ + Converter: u.Converter, + IgnoredFields: u.IgnoredFields, + returnInputOnNoop: u.ReturnInputOnNoop, + } +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + // Deprecated: This will eventually become private. + Converter Converter + + // Deprecated: This will eventually become private. + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + returnInputOnNoop bool } func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { @@ -126,12 +147,6 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - newObject, err = liveObject.NormalizeUnions(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) if err != nil { return nil, fieldpath.ManagedFields{}, err @@ -145,7 +160,7 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp ignored = fieldpath.NewSet() } managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), version, false, ) @@ -157,30 +172,17 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp // Apply should be called when Apply is run, given the current object as // well as the configuration that is applied. This will merge the object -// and return it. If the object hasn't changed, nil is returned (the -// managers can still have changed though). +// and return it. func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { var err error managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - configObject, err = configObject.NormalizeUnionsApply(configObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } newObject, err := liveObject.Merge(configObject) if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) } - if s.enableUnions { - newObject, err = configObject.NormalizeUnionsApply(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } lastSet := managers[manager] set, err := configObject.ToFieldSet() if err != nil { @@ -200,11 +202,11 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) } - managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + managers, _, err = s.update(liveObject, newObject, version, managers, manager, force) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if compare.IsSame() { + if !s.returnInputOnNoop && value.EqualsUsing(value.NewFreelistAllocator(), liveObject.AsValue(), newObject.AsValue()) { newObject = nil } return newObject, managers, nil @@ -218,7 +220,8 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel if lastSet == nil || lastSet.Set().Empty() { return merged, nil } - convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + version := lastSet.APIVersion() + convertedMerged, err := s.Converter.Convert(merged, version) if err != nil { if s.Converter.IsMissingVersionError(err) { return merged, nil @@ -228,7 +231,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) - pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, version, managers, applyingManager) if err != nil { return nil, fmt.Errorf("failed add back owned items: %v", err) } @@ -241,7 +244,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel // addBackOwnedItems adds back any fields, list and map items that were removed by prune, // but other appliers or updaters (or the current applier's new config) claim to own. -func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, prunedVersion fieldpath.APIVersion, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { var err error managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} for _, managerSet := range managedFields { @@ -252,7 +255,6 @@ func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFie } // Add back owned items at pruned version first to avoid conversion failure // caused by pruned fields which are required for conversion. - prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) if managed, ok := managedAtVersion[prunedVersion]; ok { merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) if err != nil { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 7e5dc758..5d3707a5 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -73,7 +73,7 @@ type Atom struct { } // Scalar (AKA "primitive") represents a type which has a single value which is -// either numeric, string, or boolean. +// either numeric, string, or boolean, or untyped for any of them. // // TODO: split numeric into float/int? Something even more fine-grained? type Scalar string @@ -82,6 +82,7 @@ const ( Numeric = Scalar("numeric") String = Scalar("string") Boolean = Scalar("boolean") + Untyped = Scalar("untyped") ) // ElementRelationship is an enum of the different possible relationships diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index 7d64d130..6eb6c36d 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -110,7 +110,7 @@ var SchemaSchemaYAML = `types: scalar: string - name: deduceInvalidDiscriminator type: - scalar: bool + scalar: boolean - name: fields type: list: @@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types: list: elementType: scalar: string + elementRelationship: atomic - name: untyped map: fields: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go new file mode 100644 index 00000000..ed483cbb --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -0,0 +1,460 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 19c77334..78fdb0e7 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str return field.Default, nil } -func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // null entries are illegal. @@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, return pe, nil } -func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.IsMap(): @@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { - if list.ElementRelationship == schema.Associative { - if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(a, s, list, index, child) - } +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } - // If there's no keys, then we must be a set of primitives. - return setItemToPathElement(list, index, child) + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) } - // Use the index as a key for atomic lists. - return fieldpath.PathElement{Index: &index}, nil + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 91364408..fa227ac4 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -113,11 +113,12 @@ func (w *mergingWalker) doLeaf() { w.rule(w) } -func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { - errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) - errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) - if len(errs) > 0 { - return errs +func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) } // All scalars are leaf fields. @@ -179,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } out := make([]interface{}, 0, outLen) - rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) errs = append(errs, rhsErrs...) - lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) errs = append(errs, lhsErrs...) + if len(errs) != 0 { + return errs + } + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) - for i := range rhsOrder { - pe := &rhsOrder[i] + for i := range rhsPEs { + pe := &rhsPEs[i] if _, ok := observedLHS.Get(*pe); ok { sharedOrder = append(sharedOrder, pe) } @@ -198,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err sharedOrder = sharedOrder[1:] } - lLen, rLen = len(lhsOrder), len(rhsOrder) + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) for lI, rI := 0, 0; lI < lLen || rI < rLen; { if lI < lLen && rI < rLen { - pe := lhsOrder[lI] - if pe.Equals(rhsOrder[rI]) { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { // merge LHS & RHS items - lChild, _ := observedLHS.Get(pe) + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -221,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } continue } - if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { // shared item, but not the one we want in this round lI++ continue } } if lI < lLen { - pe := lhsOrder[lI] + pe := lhsPEs[lI] if _, ok := observedRHS.Get(pe); !ok { - // take LHS item - lChild, _ := observedLHS.Get(pe) + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) errs = append(errs, errs...) if mergeOut != nil { @@ -239,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } lI++ continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ } } if rI < rLen { // Take the RHS item, merge with matching LHS item if possible - pe := rhsOrder[rI] - lChild, _ := observedLHS.Get(pe) // may be nil + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -271,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { var errs ValidationErrors length := 0 if list != nil { @@ -281,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( pes := make([]fieldpath.PathElement, 0, length) for i := 0; i < length; i++ { child := list.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -289,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( // this element. continue } - if _, found := observed.Get(pe); found { + if _, found := observed.Get(pe); found && !allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) } - observed.Insert(pe, child) pes = append(pes, pe) } return pes, observed, errs diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 3949a78f..4258ee5b 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool { // FromYAML parses a yaml string into an object with the current schema // and the type "typename" or an error if validation fails. -func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { var v interface{} err := yaml.Unmarshal([]byte(object), &v) if err != nil { return nil, err } - return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) } // FromUnstructured converts a go "interface{}" type, typically an @@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { // The provided interface{} must be one of: map[string]interface{}, // map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. -func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { - return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) } // FromStructured converts a go "interface{}" type, typically an structured object in @@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { // schema validation. The provided "interface{}" value must be a pointer so that the // value can be modified via reflection. The provided "interface{}" may contain structs // and types that are converted to Values by the jsonMarshaler interface. -func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { v, err := value.NewValueReflect(in) if err != nil { return nil, fmt.Errorf("error creating struct value reflector: %v", err) } - return AsTyped(v, p.Schema, p.TypeRef) + return AsTyped(v, p.Schema, p.TypeRef, opts...) } // DeducedParseableType is a ParseableType that deduces the type from diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index a338d761..ad071ee8 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { - i, item := iter.Item() + _, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go index 047efff0..d563a87e 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index d63a97fe..9be90282 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "fmt" - "strings" "sync" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -26,16 +24,24 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/value" ) +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. -func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { tv := &TypedValue{ value: v, typeRef: typeRef, schema: s, } - if err := tv.Validate(); err != nil { + if err := tv.Validate(opts...); err != nil { return nil, err } return tv, nil @@ -45,6 +51,10 @@ func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedVal // conforms to the schema, for cases where that has already been checked or // where you're going to call a method that validates as a side-effect (like // ToFieldSet). +// +// Deprecated: This function was initially created because validation +// was expensive. Now that this has been solved, objects should always +// be created as validated, using `AsTyped`. func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { tv := &TypedValue{ value: v, @@ -77,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema { } // Validate returns an error with a list of every spec violation. -func (tv TypedValue) Validate() error { +func (tv TypedValue) Validate(opts ...ValidationOptions) error { w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } defer w.finished() if errs := w.validate(nil); len(errs) != 0 { return errs @@ -113,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { return merge(&tv, pso, ruleKeepRHS, nil) } +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + // Compare compares the two objects. See the comments on the `Comparison` // struct for details on the return value. // @@ -120,33 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { - c = &Comparison{ + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ Removed: fieldpath.NewSet(), Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - _, err = merge(&tv, rhs, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } else if !value.Equals(w.rhs, w.lhs) { - // TODO: Equality is not sufficient for this. - // Need to implement equality check on the value type. - c.Modified.Insert(w.path) - } - }, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } - }) - if err != nil { - return nil, err + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() } - return c, nil + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil } // RemoveItems removes each provided list or map item from the value. @@ -161,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { return &tv } -// NormalizeUnions takes the new object and normalizes the union: -// - If discriminator changed to non-nil, and a new field has been added -// that doesn't match, an error is returned, -// - If discriminator hasn't changed and two fields or more are set, an -// error is returned, -// - If discriminator changed to non-nil, all other fields but the -// discriminated one will be cleared, -// - Otherwise, If only one field is left, update discriminator to that value. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnions(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - -// NormalizeUnionsApply specifically normalize unions on apply. It -// validates that the applied union is correct (there should be no -// ambiguity there), and clear the fields according to the sent intent. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnionsApply(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - func (tv TypedValue) Empty() *TypedValue { tv.value = value.NewValueInterface(nil) return &tv @@ -273,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) } return out, nil } - -// Comparison is the return value of a TypedValue.Compare() operation. -// -// No field will appear in more than one of the three fieldsets. If all of the -// fieldsets are empty, then the objects must have been equal. -type Comparison struct { - // Removed contains any fields removed by rhs (the right-hand-side - // object in the comparison). - Removed *fieldpath.Set - // Modified contains fields present in both objects but different. - Modified *fieldpath.Set - // Added contains any fields added by rhs. - Added *fieldpath.Set -} - -// IsSame returns true if the comparison returned no changes (the two -// compared objects are similar). -func (c *Comparison) IsSame() bool { - return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() -} - -// String returns a human readable version of the comparison. -func (c *Comparison) String() string { - bld := strings.Builder{} - if !c.Modified.Empty() { - bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) - } - if !c.Added.Empty() { - bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) - } - if !c.Removed.Empty() { - bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) - } - return bld.String() -} - -// ExcludeFields fields from the compare recursively removes the fields -// from the entire comparison -func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { - if fields == nil || fields.Empty() { - return c - } - c.Removed = c.Removed.RecursiveDifference(fields) - c.Modified = c.Modified.RecursiveDifference(fields) - c.Added = c.Added.RecursiveDifference(fields) - return c -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go deleted file mode 100644 index 1fa5d88a..00000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package typed - -import ( - "fmt" - "strings" - - "sigs.k8s.io/structured-merge-diff/v4/schema" - "sigs.k8s.io/structured-merge-diff/v4/value" -) - -func normalizeUnions(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - for _, union := range atom.Map.Unions { - if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { - return err - } - } - return nil -} - -func normalizeUnionsApply(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - - for _, union := range atom.Map.Unions { - out := value.NewValueInterface(*w.out) - if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { - return err - } - *w.out = out.Unstructured() - } - return nil -} - -type discriminated string -type field string - -type discriminatedNames struct { - f2d map[field]discriminated - d2f map[discriminated]field -} - -func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { - d2f := map[discriminated]field{} - for key, value := range f2d { - d2f[value] = key - } - return discriminatedNames{ - f2d: f2d, - d2f: d2f, - } -} - -func (dn discriminatedNames) toField(d discriminated) field { - if f, ok := dn.d2f[d]; ok { - return f - } - return field(d) -} - -func (dn discriminatedNames) toDiscriminated(f field) discriminated { - if d, ok := dn.f2d[f]; ok { - return d - } - return discriminated(f) -} - -type discriminator struct { - name string -} - -func (d *discriminator) Set(m value.Map, v discriminated) { - if d == nil { - return - } - m.Set(d.name, value.NewValueInterface(string(v))) -} - -func (d *discriminator) Get(m value.Map) discriminated { - if d == nil || m == nil { - return "" - } - val, ok := m.Get(d.name) - if !ok { - return "" - } - if !val.IsString() { - return "" - } - return discriminated(val.AsString()) -} - -type fieldsSet map[field]struct{} - -// newFieldsSet returns a map of the fields that are part of the union and are set -// in the given map. -func newFieldsSet(m value.Map, fields []field) fieldsSet { - if m == nil { - return nil - } - set := fieldsSet{} - for _, f := range fields { - if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { - set.Add(f) - } - } - return set -} - -func (fs fieldsSet) Add(f field) { - if fs == nil { - fs = map[field]struct{}{} - } - fs[f] = struct{}{} -} - -func (fs fieldsSet) One() *field { - for f := range fs { - return &f - } - return nil -} - -func (fs fieldsSet) Has(f field) bool { - _, ok := fs[f] - return ok -} - -func (fs fieldsSet) List() []field { - fields := []field{} - for f := range fs { - fields = append(fields, f) - } - return fields -} - -func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { - n := fieldsSet{} - for f := range fs { - if !o.Has(f) { - n.Add(f) - } - } - return n -} - -func (fs fieldsSet) String() string { - s := []string{} - for k := range fs { - s = append(s, string(k)) - } - return strings.Join(s, ", ") -} - -type union struct { - deduceInvalidDiscriminator bool - d *discriminator - dn discriminatedNames - f []field -} - -func newUnion(su *schema.Union) *union { - u := &union{} - if su.Discriminator != nil { - u.d = &discriminator{name: *su.Discriminator} - } - f2d := map[field]discriminated{} - for _, f := range su.Fields { - u.f = append(u.f, field(f.FieldName)) - f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) - } - u.dn = newDiscriminatedName(f2d) - u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator - return u -} - -// clear removes all the fields in map that are part of the union, but -// the one we decided to keep. -func (u *union) clear(m value.Map, f field) { - for _, fieldName := range u.f { - if field(fieldName) != f { - m.Delete(string(fieldName)) - } - } -} - -func (u *union) Normalize(old, new, out value.Map) error { - os := newFieldsSet(old, u.f) - ns := newFieldsSet(new, u.f) - diff := ns.Difference(os) - - if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { - if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { - return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) - } - if len(diff) > 1 { - return fmt.Errorf("multiple new fields added: %v", diff) - } - u.clear(out, u.dn.toField(u.d.Get(new))) - return nil - } - - if len(ns) > 1 { - return fmt.Errorf("multiple fields set without discriminator change: %v", ns) - } - - // Set discriminiator if it needs to be deduced. - if u.deduceInvalidDiscriminator && len(ns) == 1 { - u.d.Set(out, u.dn.toDiscriminated(*ns.One())) - } - - return nil -} - -func (u *union) NormalizeApply(applied, merged, out value.Map) error { - as := newFieldsSet(applied, u.f) - if len(as) > 1 { - return fmt.Errorf("more than one field of union applied: %v", as) - } - if len(as) == 0 { - // None is set, just leave. - return nil - } - // We have exactly one, discriminiator must match if set - if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { - return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) - } - - // Update discriminiator if needed - if u.deduceInvalidDiscriminator { - u.d.Set(out, u.dn.toDiscriminated(*as.One())) - } - // Clear others fields. - u.clear(out, *as.One()) - - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go index 378d3021..652e24c8 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + v.allowDuplicates = false if v.allocator == nil { v.allocator = value.NewFreelistAllocator() } @@ -49,6 +50,9 @@ type validatingObjectWalker struct { value value.Value schema *schema.Schema typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker @@ -102,6 +106,12 @@ func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs Valida if !v.IsBool() { return errorf("%vexpected boolean, got %v", prefix, v) } + case schema.Untyped: + if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() { + return errorf("%vexpected any scalar, got %v", prefix, v) + } + default: + return errorf("%vunexpected scalar type in schema: %v", prefix, *t) } return nil } @@ -123,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) pe.Index = &i } else { var err error - pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -131,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) // this element. return } - if observedKeys.Has(pe) { + if observedKeys.Has(pe) && !v.allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) } observedKeys.Insert(pe) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go index dc8b8c72..c38402b9 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go @@ -136,7 +136,7 @@ func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { if !ok { return false } - return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go index d8e20862..c3ae00b1 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go @@ -88,12 +88,12 @@ func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } @@ -168,12 +168,12 @@ func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index a5a467c0..f0d58d42 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -154,7 +154,9 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi if field.Type.Kind() == reflect.Ptr { e = field.Type.Elem() } - buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + if e.Kind() == reflect.Struct { + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + } continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE index 7805d36d..093d6d3e 100644 --- a/vendor/sigs.k8s.io/yaml/LICENSE +++ b/vendor/sigs.k8s.io/yaml/LICENSE @@ -48,3 +48,259 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# The forked go-yaml.v3 library under this project is covered by two +different licenses (MIT and Apache): + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +# The forked go-yaml.v2 library under the project is covered by an +Apache license: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 325b40b0..003a149e 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -2,26 +2,22 @@ approvers: - dims -- lavalamp +- jpbetz - smarterclayton - deads2k - sttts - liggitt -- caesarxuchao reviewers: - dims - thockin -- lavalamp +- jpbetz - smarterclayton - wojtek-t - deads2k - derekwaynecarr -- caesarxuchao - mikedanese - liggitt -- gmarek - sttts -- ncdc - tallclair labels: - sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go index 235b7f2c..0ea28bd0 100644 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -16,53 +16,53 @@ import ( "unicode/utf8" ) -// indirect walks down v allocating pointers as needed, +// indirect walks down 'value' allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, +func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If 'value' is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() + if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() { + value = value.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e + if value.Kind() == reflect.Interface && !value.IsNil() { + element := value.Elem() + if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) { + value = element continue } } - if v.Kind() != reflect.Ptr { + if value.Kind() != reflect.Ptr { break } - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() { break } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) + if value.IsNil() { + if value.CanSet() { + value.Set(reflect.New(value.Type().Elem())) } else { - v = reflect.New(v.Type().Elem()) + value = reflect.New(value.Type().Elem()) } } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { + if value.Type().NumMethod() > 0 { + if u, ok := value.Interface().(json.Unmarshaler); ok { return u, nil, reflect.Value{} } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + if u, ok := value.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } - v = v.Elem() + value = value.Elem() } - return nil, nil, v + return nil, nil, value } // A field represents a single field found in a struct. @@ -134,8 +134,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} @@ -348,8 +348,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See http://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and @@ -420,10 +421,8 @@ func equalFoldRight(s, t []byte) bool { t = t[size:] } - if len(t) > 0 { - return false - } - return true + + return len(t) <= 0 } // asciiEqualFold is a specialization of bytes.EqualFold for use when diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml new file mode 100644 index 00000000..8da58fbf --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE similarity index 78% rename from vendor/k8s.io/gengo/examples/set-gen/sets/doc.go rename to vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE index b152a0bf..866d74a7 100644 --- a/vendor/k8s.io/gengo/examples/set-gen/sets/doc.go +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE @@ -1,5 +1,4 @@ -/* -Copyright The Kubernetes Authors. +Copyright 2011-2016 Canonical Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,9 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -// Package sets has auto-generated set types. -package sets diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS new file mode 100644 index 00000000..73be0a3a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS @@ -0,0 +1,24 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +- natasha41575 +- knverey +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md new file mode 100644 index 00000000..53f4139d --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md @@ -0,0 +1,143 @@ +# go-yaml fork + +This package is a fork of the go-yaml library and is intended solely for consumption +by kubernetes projects. In this fork, we plan to support only critical changes required for +kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests +should be made in the upstream go-yaml library, and we will reject such changes in this fork +unless we are pulling them from upstream. + +This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 + +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go new file mode 100644 index 00000000..acf71402 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go new file mode 100644 index 00000000..129bc2a9 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go new file mode 100644 index 00000000..a1c2cc52 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go new file mode 100644 index 00000000..0ee738e1 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go new file mode 100644 index 00000000..81d05dfe --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go new file mode 100644 index 00000000..7c1f5fac --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go new file mode 100644 index 00000000..4120e0c9 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go new file mode 100644 index 00000000..0b9bb603 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go new file mode 100644 index 00000000..4c45e660 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go new file mode 100644 index 00000000..a2dde608 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go new file mode 100644 index 00000000..30813884 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go new file mode 100644 index 00000000..f6a9c8e3 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go index efbc535d..fc10246b 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import ( @@ -8,56 +24,59 @@ import ( "reflect" "strconv" - "gopkg.in/yaml.v2" + "sigs.k8s.io/yaml/goyaml.v2" ) -// Marshal marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) +// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) +func Marshal(obj interface{}) ([]byte, error) { + jsonBytes, err := json.Marshal(obj) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: %w", err) } - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil + return JSONToYAML(jsonBytes) } // JSONOpt is a decoding option for decoding from JSON format. type JSONOpt func(*json.Decoder) *json.Decoder -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, false, opts...) +// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the +// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer. +// +// Important notes about the Unmarshal logic: +// +// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users. +// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this. +// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative. +// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process. +// - There are no compatibility guarantees for returned error values. +func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...) } -// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal -// into an object, optionally configuring the behavior of the JSON unmarshal. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions: +// +// - Duplicate fields in an object yield an error. This is according to the YAML specification. +// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error. +func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...) } -// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// unmarshal unmarshals the given YAML byte stream into the given interface, // optionally performing the unmarshalling strictly -func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { - vo := reflect.ValueOf(o) - unmarshalFn := yaml.Unmarshal - if strict { - unmarshalFn = yaml.UnmarshalStrict - } - j, err := yamlToJSON(y, &vo, unmarshalFn) +func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error { + jsonTarget := reflect.ValueOf(obj) + + jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn) if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) + return fmt.Errorf("error converting YAML to JSON: %w", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...) if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) + return fmt.Errorf("error unmarshaling JSON: %w", err) } return nil @@ -67,21 +86,26 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error // object, optionally applying decoder options prior to decoding. We are not // using json.Unmarshal directly as we want the chance to pass in non-default // options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) +func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(reader) for _, opt := range opts { d = opt(d) } - if err := d.Decode(&o); err != nil { + if err := d.Decode(&obj); err != nil { return fmt.Errorf("while decoding JSON: %v", err) } return nil } -// JSONToYAML Converts JSON to YAML. +// JSONToYAML converts JSON to YAML. Notable implementation details: +// +// - Duplicate fields, are case-sensitively ignored in an undefined order. +// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, // etc.) when unmarshalling to interface{}, it just picks float64 @@ -93,35 +117,46 @@ func JSONToYAML(j []byte) ([]byte, error) { } // Marshal this object into YAML. - return yaml.Marshal(jsonObj) + yamlBytes, err := yaml.Marshal(jsonObj) + if err != nil { + return nil, err + } + + return yamlBytes, nil } // YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, // passing JSON through this method should be a no-op. // -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. +// Some things YAML can do that are not supported by JSON: +// - In YAML you can have binary and null keys in your maps. These are invalid +// in JSON, and therefore int, bool and float keys are converted to strings implicitly. +// - Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// - And more... read the YAML specification for more details. +// +// Notable about the implementation: // -// For strict decoding of YAML, use YAMLToJSONStrict. +// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. +// - There are no compatibility guarantees for returned error values. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) + return yamlToJSONTarget(y, nil, yaml.Unmarshal) } // YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, // returning an error on any duplicate field names. func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := unmarshalFn(yamlBytes, &yamlObj) if err != nil { return nil, err } @@ -136,7 +171,11 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, } // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) + jsonBytes, err := json.Marshal(jsonObj) + if err != nil { + return nil, err + } + return jsonBytes, nil } func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { @@ -147,13 +186,13 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in // decoding into the value, we're just checking if the ultimate target is a // string. if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) + jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false) // We have a JSON or Text Umarshaler at this level, so we can't be trying // to decode into a string. - if ju != nil || tu != nil { + if jsonUnmarshaler != nil || textUnmarshaler != nil { jsonTarget = nil } else { - jsonTarget = &pv + jsonTarget = &pointerValue } } @@ -205,7 +244,7 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in keyString = "false" } default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v", reflect.TypeOf(k), k, v) } diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go index ab3e06a2..94abc171 100644 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -1,7 +1,24 @@ // This file contains changes that are only compatible with go 1.10 and onwards. +//go:build go1.10 // +build go1.10 +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import "encoding/json"