diff --git a/.codecov.yml b/.codecov.yml index 4bd982eb802..4441555b65d 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -4,30 +4,38 @@ codecov: require_ci_to_pass: false notify: - wait_for_ci: true + wait_for_ci: false + after_n_builds: 2 + +ignore: + - "_.*" + - "vendor" + - "scripts" + - "contracts" + - "Makefile" coverage: status: project: - default: - informational: true unit-tests: target: auto + threshold: 1 flags: - unit functional-tests: + threshold: 0.1 target: auto flags: - functional patch: default: - informational: true + target: 50 unit-tests: - target: auto + informational: true flags: - unit functional-tests: - target: auto + informational: true flags: - functional @@ -39,7 +47,7 @@ flags: functional-tests: paths: - ".*" - carryforward: true + carryforward: false comment: behavior: default diff --git a/Makefile b/Makefile index 42ce5451816..4149ccba87c 100644 --- a/Makefile +++ b/Makefile @@ -193,6 +193,11 @@ statusgo-cross: statusgo-android statusgo-ios @echo "Full cross compilation done." @ls -ld build/bin/statusgo-* +status-go-deps: + go install go.uber.org/mock/mockgen@v0.4.0 + go install github.com/kevinburke/go-bindata/v4/...@v4.0.2 + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.1 + statusgo-android: generate statusgo-android: ##@cross-compile Build status-go for Android @echo "Building status-go for Android..." @@ -398,6 +403,7 @@ test-e2e: ##@tests Run e2e tests test-e2e-race: export GOTEST_EXTRAFLAGS=-race test-e2e-race: test-e2e ##@tests Run e2e tests with -race flag +test-functional: generate test-functional: export FUNCTIONAL_TESTS_DOCKER_UID ?= $(call sh, id -u) test-functional: export FUNCTIONAL_TESTS_REPORT_CODECOV ?= false test-functional: @@ -407,7 +413,10 @@ canary-test: node-canary # TODO: uncomment that! #_assets/scripts/canary_test_mailservers.sh ./config/cli/fleet-eth.prod.json -lint: generate +lint-panics: generate + go run ./cmd/lint-panics -root="$(call sh, pwd)" -skip=./cmd -test=false ./... + +lint: generate lint-panics golangci-lint run ./... ci: generate lint canary-test test-unit test-e2e ##@tests Run all linters and tests at once diff --git a/_assets/ci/Jenkinsfile b/_assets/ci/Jenkinsfile index ed66572c3e0..ab3fa726d18 100644 --- a/_assets/ci/Jenkinsfile +++ b/_assets/ci/Jenkinsfile @@ -1,5 +1,5 @@ #!/usr/bin/env groovy -library 'status-jenkins-lib@v1.9.6' +library 'status-jenkins-lib@v1.9.12' pipeline { agent { label 'linux' } @@ -52,6 +52,12 @@ pipeline { stage('Linux') { steps { script { linux = jenkins.Build('status-go/platforms/linux') } } } + stage('MacOS') { steps { script { + linux = jenkins.Build('status-go/platforms/macos') + } } } + stage('Windows') { steps { script { + linux = jenkins.Build('status-go/platforms/windows') + } } } stage('Docker') { steps { script { dock = jenkins.Build('status-go/platforms/docker') } } } diff --git a/_assets/ci/Jenkinsfile.android b/_assets/ci/Jenkinsfile.android index ef13cfaffbc..ed80f3822a5 100644 --- a/_assets/ci/Jenkinsfile.android +++ b/_assets/ci/Jenkinsfile.android @@ -85,6 +85,9 @@ pipeline { post { success { script { github.notifyPR(true) } } failure { script { github.notifyPR(false) } } - cleanup { sh 'make deep-clean' } + cleanup { + cleanWs() + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } } // post } // pipeline diff --git a/_assets/ci/Jenkinsfile.desktop b/_assets/ci/Jenkinsfile.desktop new file mode 100644 index 00000000000..c7e460c8468 --- /dev/null +++ b/_assets/ci/Jenkinsfile.desktop @@ -0,0 +1,166 @@ +#!/usr/bin/env groovy +library 'status-jenkins-lib@v1.9.12' + +pipeline { + /* This way we run the same Jenkinsfile on different platforms. */ + agent { label "${params.AGENT_LABEL}" } + + parameters { + string( + name: 'BRANCH', + defaultValue: 'develop', + description: 'Name of branch to build.' + ) + string( + name: 'AGENT_LABEL', + description: 'Label for targetted CI slave host.', + defaultValue: params.AGENT_LABEL ?: getAgentLabel(), + ) + booleanParam( + name: 'RELEASE', + defaultValue: false, + description: 'Enable to create build for release.', + ) + } + + options { + timestamps() + ansiColor('xterm') + /* Prevent Jenkins jobs from running forever */ + timeout(time: 15, unit: 'MINUTES') + disableConcurrentBuilds() + /* manage how many builds we keep */ + buildDiscarder(logRotator( + numToKeepStr: '5', + daysToKeepStr: '30', + artifactNumToKeepStr: '1', + )) + } + + environment { + PLATFORM = getPlatformFromLabel(params.AGENT_LABEL) + TMPDIR = "${WORKSPACE_TMP}" + GOPATH = "${WORKSPACE_TMP}/go" + GOCACHE = "${WORKSPACE_TMP}/gocache" + PATH = "${PATH}:${GOPATH}/bin:/c/Users/jenkins/go/bin" + REPO_SRC = "${GOPATH}/src/github.com/status-im/status-go" + VERSION = sh(script: "./_assets/scripts/version.sh", returnStdout: true) + ARTIFACT = utils.pkgFilename( + name: 'status-go', + type: env.PLATFORM, + version: env.VERSION, + ext: 'zip', + ) + /* prevent sharing cache dir across different jobs */ + GO_GENERATE_FAST_DIR = "${env.WORKSPACE_TMP}/go-generate-fast" + } + + stages { + stage('Setup') { + steps { + script { + if (env.PLATFORM != 'windows') { + sh "mkdir -p \$(dirname ${REPO_SRC})" + sh "ln -s ${WORKSPACE} ${REPO_SRC}" + } + } + } + } + + stage('Deps') { + steps { script { + shell('make status-go-deps') + } + } + } + + stage('Generate') { + steps { script { + shell('make generate') + } + } + } + + stage('Build Static Lib') { + steps { + script { + shell('make statusgo-library') + } + } + } + + stage('Build Shared Lib') { + steps { + script { + shell('make statusgo-shared-library') + } + } + } + + stage('Archive') { + steps { + zip zipFile: "${ARTIFACT}", archive: true, dir: 'build/bin' + } + } + + stage('Upload') { + steps { + script { + env.PKG_URL = s5cmd.upload(ARTIFACT) + } + } + } + stage('Cleanup') { + steps { + script { + cleanTmp() + } + } + } +} // stages + post { + success { script { github.notifyPR(true) } } + failure { script { github.notifyPR(false) } } + cleanup { cleanWs() } + } // post +} // pipeline + +/* This allows us to use one Jenkinsfile and run + * jobs on different platforms based on job name. */ +def getAgentLabel() { + if (params.AGENT_LABEL) { return params.AGENT_LABEL } + /* We extract the name of the job from currentThread because + * before an agent is picket env is not available. */ + def tokens = Thread.currentThread().getName().split('/') + def labels = [] + /* Check if the job path contains any of the valid labels. */ + ['linux', 'macos', 'windows', 'x86_64', 'aarch64', 'arm64'].each { + if (tokens.contains(it)) { labels.add(it) } + } + return labels.join(' && ') +} + +/* This function extracts the platform from the AGENT_LABEL */ +def getPlatformFromLabel(label) { + for (platform in ['linux', 'macos', 'windows']) { + if (label.contains(platform)) { + return platform + } + } +} + +def shell(cmd) { + if (env.PLATFORM == 'windows') { + sh "${cmd} SHELL=/bin/sh" + } else { + nix.shell(cmd, pure: false) // Use nix.shell for Linux/macOS + } +} + +def cleanTmp() { + if (env.PLATFORM == 'windows') { + sh "rm -rf ${env.WORKSPACE}@tmp" + } else { + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } +} \ No newline at end of file diff --git a/_assets/ci/Jenkinsfile.ios b/_assets/ci/Jenkinsfile.ios index 935b4e12243..90dbc3d48e4 100644 --- a/_assets/ci/Jenkinsfile.ios +++ b/_assets/ci/Jenkinsfile.ios @@ -89,6 +89,9 @@ pipeline { post { success { script { github.notifyPR(true) } } failure { script { github.notifyPR(false) } } - cleanup { sh 'make deep-clean' } + cleanup { + cleanWs() + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } } // post } // pipeline diff --git a/_assets/ci/Jenkinsfile.linux b/_assets/ci/Jenkinsfile.linux deleted file mode 100644 index 1e76096b868..00000000000 --- a/_assets/ci/Jenkinsfile.linux +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env groovy -library 'status-jenkins-lib@v1.9.6' - -pipeline { - agent { label 'linux && x86_64 && nix-2.19' } - - parameters { - string( - name: 'BRANCH', - defaultValue: 'develop', - description: 'Name of branch to build.' - ) - booleanParam( - name: 'RELEASE', - defaultValue: false, - description: 'Enable to create build for release.', - ) - } - - options { - timestamps() - ansiColor('xterm') - /* Prevent Jenkins jobs from running forever */ - timeout(time: 10, unit: 'MINUTES') - disableConcurrentBuilds() - /* manage how many builds we keep */ - buildDiscarder(logRotator( - numToKeepStr: '5', - daysToKeepStr: '30', - artifactNumToKeepStr: '1', - )) - } - - environment { - PLATFORM = 'linux' - TMPDIR = "${WORKSPACE_TMP}" - GOPATH = "${WORKSPACE_TMP}/go" - GOCACHE = "${WORKSPACE_TMP}/gocache" - PATH = "${PATH}:${GOPATH}/bin" - REPO_SRC = "${GOPATH}/src/github.com/status-im/status-go" - VERSION = sh(script: "./_assets/scripts/version.sh", returnStdout: true) - ARTIFACT = utils.pkgFilename( - name: 'status-go', - type: env.PLATFORM, - version: env.VERSION, - ext: 'zip', - ) - /* prevent sharing cache dir across different jobs */ - GO_GENERATE_FAST_DIR = "${env.WORKSPACE_TMP}/go-generate-fast" - } - - stages { - stage('Setup') { - steps { /* Go needs to find status-go in GOPATH. */ - sh "mkdir -p \$(dirname ${REPO_SRC})" - sh "ln -s ${WORKSPACE} ${REPO_SRC}" - } - } - - stage('Generate') { - steps { script { - nix.shell('make generate', pure: false) - } } - } - - /* Sanity-check C bindings */ - stage('Build Static Lib') { - steps { script { - nix.shell('make statusgo-library', pure: false) - } } - } - - stage('Build Shared Lib') { - steps { script { - nix.shell('make statusgo-shared-library', pure: false) - } } - } - - stage('Archive') { - steps { - sh "zip -q -r ${ARTIFACT} build/bin" - archiveArtifacts(ARTIFACT) - } - } - - stage('Upload') { - steps { script { - env.PKG_URL = s5cmd.upload(ARTIFACT) - } } - } - } // stages - post { - success { script { github.notifyPR(true) } } - failure { script { github.notifyPR(false) } } - cleanup { sh 'make deep-clean' } - } // post -} // pipeline diff --git a/_assets/ci/Jenkinsfile.linux b/_assets/ci/Jenkinsfile.linux new file mode 120000 index 00000000000..6047babb30e --- /dev/null +++ b/_assets/ci/Jenkinsfile.linux @@ -0,0 +1 @@ +Jenkinsfile.desktop \ No newline at end of file diff --git a/_assets/ci/Jenkinsfile.macos b/_assets/ci/Jenkinsfile.macos new file mode 120000 index 00000000000..6047babb30e --- /dev/null +++ b/_assets/ci/Jenkinsfile.macos @@ -0,0 +1 @@ +Jenkinsfile.desktop \ No newline at end of file diff --git a/_assets/ci/Jenkinsfile.tests b/_assets/ci/Jenkinsfile.tests index 627f4e71c81..a542284f1ad 100644 --- a/_assets/ci/Jenkinsfile.tests +++ b/_assets/ci/Jenkinsfile.tests @@ -64,7 +64,7 @@ pipeline { environment { PLATFORM = 'tests' DB_CONT = "status-go-test-db-${env.EXECUTOR_NUMBER.toInteger() + 1}" - DB_PORT = "${5432 + env.EXECUTOR_NUMBER.toInteger()}" + DB_PORT = "${54321 + env.EXECUTOR_NUMBER.toInteger()}" TMPDIR = "${WORKSPACE_TMP}" GOPATH = "${WORKSPACE_TMP}/go" GOCACHE = "${WORKSPACE_TMP}/gocache" @@ -238,8 +238,8 @@ pipeline { } } cleanup { - dir(env.TMPDIR) { deleteDir() } - sh "make git-clean" + cleanWs() + dir("${env.WORKSPACE}@tmp") { deleteDir() } } } // post } // pipeline @@ -254,4 +254,4 @@ def getDefaultUnitTestCount() { isNightlyJob() ? '20' : '1' } def getDefaultTimeout() { isNightlyJob() ? 5*60 : 50 } -def getAmountToKeep() { isNightlyJob() ? '14' : isDevelopJob() ? '10' : '5' } \ No newline at end of file +def getAmountToKeep() { isNightlyJob() ? '14' : isDevelopJob() ? '10' : '5' } diff --git a/_assets/ci/Jenkinsfile.windows b/_assets/ci/Jenkinsfile.windows new file mode 120000 index 00000000000..6047babb30e --- /dev/null +++ b/_assets/ci/Jenkinsfile.windows @@ -0,0 +1 @@ +Jenkinsfile.desktop \ No newline at end of file diff --git a/_assets/scripts/run_functional_tests.sh b/_assets/scripts/run_functional_tests.sh index 24f2c931025..ec0012c2df2 100755 --- a/_assets/scripts/run_functional_tests.sh +++ b/_assets/scripts/run_functional_tests.sh @@ -24,29 +24,36 @@ mkdir -p "${merged_coverage_reports_path}" mkdir -p "${test_results_path}" all_compose_files="-f ${root_path}/docker-compose.anvil.yml -f ${root_path}/docker-compose.test.status-go.yml" +project_name="status-go-func-tests-$(date +%s)" # Run functional tests echo -e "${GRN}Running tests${RST}, HEAD: $(git rev-parse HEAD)" -docker-compose ${all_compose_files} up -d --build --remove-orphans +docker compose -p ${project_name} ${all_compose_files} up -d --build --remove-orphans echo -e "${GRN}Running tests-rpc${RST}" # Follow the logs, wait for them to finish -docker-compose ${all_compose_files} logs -f tests-rpc > "${root_path}/tests-rpc.log" +docker compose -p ${project_name} ${all_compose_files} logs -f tests-rpc > "${root_path}/tests-rpc.log" # Stop containers echo -e "${GRN}Stopping docker containers${RST}" -docker-compose ${all_compose_files} stop +docker compose -p ${project_name} ${all_compose_files} stop # Save logs echo -e "${GRN}Saving logs${RST}" -docker-compose ${all_compose_files} logs status-go > "${root_path}/statusd.log" -docker-compose ${all_compose_files} logs status-go-no-funds > "${root_path}/statusd-no-funds.log" +docker compose -p ${project_name} ${all_compose_files} logs status-go > "${root_path}/statusd.log" +docker compose -p ${project_name} ${all_compose_files} logs status-backend > "${root_path}/status-backend.log" + +if [ "$(uname)" = "Darwin" ]; then + separator="-" +else + separator="_" +fi # Retrieve exit code -exit_code=$(docker inspect tests-functional_tests-rpc_1 -f '{{.State.ExitCode}}'); +exit_code=$(docker inspect ${project_name}${separator}tests-rpc${separator}1 -f '{{.State.ExitCode}}'); # Cleanup containers echo -e "${GRN}Removing docker containers${RST}" -docker-compose ${all_compose_files} down +docker compose -p ${project_name} ${all_compose_files} down # Collect coverage reports echo -e "${GRN}Collecting code coverage reports${RST}" diff --git a/account/accounts.go b/account/accounts.go index 7cc1cfc0f15..ad16f7dc345 100644 --- a/account/accounts.go +++ b/account/accounts.go @@ -14,11 +14,11 @@ import ( "time" "github.com/google/uuid" + "go.uber.org/zap" gethkeystore "github.com/ethereum/go-ethereum/accounts/keystore" gethcommon "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/account/generator" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/keystore" @@ -100,6 +100,8 @@ type DefaultManager struct { selectedChatAccount *SelectedExtKey // account that was processed during the last call to SelectAccount() mainAccountAddress types.Address watchAddresses []types.Address + + logger *zap.Logger } // GetKeystore is only used in tests @@ -642,13 +644,13 @@ func (m *DefaultManager) ReEncryptKeyStoreDir(keyDirPath, oldPass, newPass strin err = os.RemoveAll(tempKeyDirPath) if err != nil { // the re-encryption is complete so we don't throw - log.Error("unable to delete tempKeyDirPath, manual cleanup required") + m.logger.Error("unable to delete tempKeyDirPath, manual cleanup required") } err = os.RemoveAll(backupKeyDirPath) if err != nil { // the re-encryption is complete so we don't throw - log.Error("unable to delete backupKeyDirPath, manual cleanup required") + m.logger.Error("unable to delete backupKeyDirPath, manual cleanup required") } return nil diff --git a/account/accounts_geth.go b/account/accounts_geth.go index a6e3bd17388..d43915d27d9 100644 --- a/account/accounts_geth.go +++ b/account/accounts_geth.go @@ -3,6 +3,8 @@ package account import ( "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/accounts" "github.com/status-im/status-go/account/generator" @@ -17,9 +19,12 @@ type GethManager struct { } // NewGethManager returns new node account manager. -func NewGethManager() *GethManager { +func NewGethManager(logger *zap.Logger) *GethManager { m := &GethManager{} - m.DefaultManager = &DefaultManager{accountsGenerator: generator.New(m)} + m.DefaultManager = &DefaultManager{ + accountsGenerator: generator.New(m), + logger: logger, + } return m } diff --git a/account/accounts_test.go b/account/accounts_test.go index 139205618de..36aa3dff2ee 100644 --- a/account/accounts_test.go +++ b/account/accounts_test.go @@ -11,6 +11,7 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/keystore" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/protocol/tt" "github.com/status-im/status-go/t/utils" "github.com/stretchr/testify/require" @@ -21,7 +22,7 @@ const testPassword = "test-password" const newTestPassword = "new-test-password" func TestVerifyAccountPassword(t *testing.T) { - accManager := NewGethManager() + accManager := NewGethManager(tt.MustCreateTestLogger()) keyStoreDir := t.TempDir() emptyKeyStoreDir := t.TempDir() @@ -103,7 +104,7 @@ func TestVerifyAccountPasswordWithAccountBeforeEIP55(t *testing.T) { err := utils.ImportTestAccount(keyStoreDir, "test-account3-before-eip55.pk") require.NoError(t, err) - accManager := NewGethManager() + accManager := NewGethManager(tt.MustCreateTestLogger()) address := types.HexToAddress(utils.TestConfig.Account3.WalletAddress) _, err = accManager.VerifyAccountPassword(keyStoreDir, address.Hex(), utils.TestConfig.Account3.Password) @@ -133,7 +134,7 @@ type testAccount struct { // SetupTest is used here for reinitializing the mock before every // test function to avoid faulty execution. func (s *ManagerTestSuite) SetupTest() { - s.accManager = NewGethManager() + s.accManager = NewGethManager(tt.MustCreateTestLogger()) keyStoreDir := s.T().TempDir() s.Require().NoError(s.accManager.InitKeystore(keyStoreDir)) diff --git a/api/backend_test.go b/api/backend_test.go index 3a40cee0e40..49f0f05f816 100644 --- a/api/backend_test.go +++ b/api/backend_test.go @@ -32,6 +32,7 @@ import ( "github.com/status-im/status-go/node" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/requests" + "github.com/status-im/status-go/protocol/tt" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/services/typeddata" "github.com/status-im/status-go/services/wallet" @@ -95,7 +96,10 @@ func setupGethStatusBackend() (*GethStatusBackend, func() error, func() error, f if err != nil { return nil, nil, nil, nil, err } - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) + if err != nil { + return nil, nil, nil, nil, err + } backend.StatusNode().SetAppDB(db) ma, stop2, err := setupTestMultiDB() @@ -292,7 +296,8 @@ func TestBackendGettersConcurrently(t *testing.T) { func TestBackendConnectionChangesConcurrently(t *testing.T) { connections := [...]string{connection.Wifi, connection.Cellular, connection.Unknown} - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) + count := 3 var wg sync.WaitGroup @@ -310,7 +315,8 @@ func TestBackendConnectionChangesConcurrently(t *testing.T) { } func TestBackendConnectionChangesToOffline(t *testing.T) { - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) + b.ConnectionChange(connection.None, false) assert.True(t, b.connectionState.Offline) @@ -386,7 +392,7 @@ func TestBackendCallRPCConcurrently(t *testing.T) { } func TestAppStateChange(t *testing.T) { - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) var testCases = []struct { name string @@ -460,7 +466,7 @@ func TestBlockedRPCMethods(t *testing.T) { } func TestCallRPCWithStoppedNode(t *testing.T) { - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) resp, err := backend.CallRPC( `{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":1}`, @@ -699,7 +705,8 @@ func TestBackendGetVerifiedAccount(t *testing.T) { func TestRuntimeLogLevelIsNotWrittenToDatabase(t *testing.T) { utils.Init() - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) + chatKey, err := gethcrypto.GenerateKey() require.NoError(t, err) walletKey, err := gethcrypto.GenerateKey() @@ -767,7 +774,8 @@ func TestRuntimeLogLevelIsNotWrittenToDatabase(t *testing.T) { func TestLoginWithKey(t *testing.T) { utils.Init() - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) + chatKey, err := gethcrypto.GenerateKey() require.NoError(t, err) walletKey, err := gethcrypto.GenerateKey() @@ -825,7 +833,8 @@ func TestLoginAccount(t *testing.T) { tmpdir := t.TempDir() nameserver := "8.8.8.8" - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) + createAccountRequest := &requests.CreateAccount{ DisplayName: "some-display-name", CustomizationColor: "#ffffff", @@ -855,6 +864,7 @@ func TestLoginAccount(t *testing.T) { acc, err := b.CreateAccountAndLogin(createAccountRequest) require.NoError(t, err) require.Equal(t, nameserver, b.config.WakuV2Config.Nameserver) + require.True(t, acc.HasAcceptedTerms) waitForLogin(c) require.NoError(t, b.Logout()) @@ -882,7 +892,8 @@ func TestLoginAccount(t *testing.T) { func TestVerifyDatabasePassword(t *testing.T) { utils.Init() - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) + chatKey, err := gethcrypto.GenerateKey() require.NoError(t, err) walletKey, err := gethcrypto.GenerateKey() @@ -920,7 +931,7 @@ func TestVerifyDatabasePassword(t *testing.T) { } func TestDeleteMultiaccount(t *testing.T) { - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) rootDataDir := t.TempDir() @@ -1279,7 +1290,7 @@ func loginDesktopUser(t *testing.T, conf *params.NodeConfig) { username := "TestUser" passwd := "0xC888C9CE9E098D5864D3DED6EBCC140A12142263BACE3A23A36F9905F12BD64A" // #nosec G101 - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) require.NoError(t, b.AccountManager().InitKeystore(conf.KeyStoreDir)) b.UpdateRootDataDir(conf.DataDir) @@ -1328,7 +1339,7 @@ func TestChangeDatabasePassword(t *testing.T) { oldPassword := "password" newPassword := "newPassword" - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) backend.UpdateRootDataDir(t.TempDir()) // Setup keystore to test decryption of it @@ -1385,7 +1396,7 @@ func TestCreateWallet(t *testing.T) { password := "some-password2" // nolint: goconst tmpdir := t.TempDir() - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) defer func() { require.NoError(t, b.StopNode()) }() @@ -1450,7 +1461,7 @@ func TestSetFleet(t *testing.T) { password := "some-password2" // nolint: goconst tmpdir := t.TempDir() - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) createAccountRequest := &requests.CreateAccount{ DisplayName: "some-display-name", CustomizationColor: "#ffffff", @@ -1519,7 +1530,7 @@ func TestWalletConfigOnLoginAccount(t *testing.T) { raribleMainnetAPIKey := "rarible-mainnet-api-key" // nolint: gosec raribleTestnetAPIKey := "rarible-testnet-api-key" // nolint: gosec - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) createAccountRequest := &requests.CreateAccount{ DisplayName: "some-display-name", CustomizationColor: "#ffffff", @@ -1584,7 +1595,7 @@ func TestTestnetEnabledSettingOnCreateAccount(t *testing.T) { utils.Init() tmpdir := t.TempDir() - b := NewGethStatusBackend() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) // Creating an account with test networks enabled createAccountRequest1 := &requests.CreateAccount{ @@ -1630,7 +1641,7 @@ func TestRestoreAccountAndLogin(t *testing.T) { utils.Init() tmpdir := t.TempDir() - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) // Test case 1: Valid restore account request restoreRequest := &requests.RestoreAccount{ @@ -1665,7 +1676,7 @@ func TestRestoreAccountAndLoginWithoutDisplayName(t *testing.T) { utils.Init() tmpdir := t.TempDir() - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) // Test case: Valid restore account request without DisplayName restoreRequest := &requests.RestoreAccount{ @@ -1684,6 +1695,30 @@ func TestRestoreAccountAndLoginWithoutDisplayName(t *testing.T) { require.NotEmpty(t, account.Name) } +func TestAcceptTerms(t *testing.T) { + tmpdir := t.TempDir() + b := NewGethStatusBackend(tt.MustCreateTestLogger()) + conf, err := params.NewNodeConfig(tmpdir, 1777) + require.NoError(t, err) + require.NoError(t, b.AccountManager().InitKeystore(conf.KeyStoreDir)) + b.UpdateRootDataDir(conf.DataDir) + require.NoError(t, b.OpenAccounts()) + nameserver := "8.8.8.8" + createAccountRequest := &requests.CreateAccount{ + DisplayName: "some-display-name", + CustomizationColor: "#ffffff", + Password: "some-password", + RootDataDir: tmpdir, + LogFilePath: tmpdir + "/log", + WakuV2Nameserver: &nameserver, + WakuV2Fleet: "status.staging", + } + _, err = b.CreateAccountAndLogin(createAccountRequest) + require.NoError(t, err) + err = b.AcceptTerms() + require.NoError(t, err) +} + func TestCreateAccountPathsValidation(t *testing.T) { tmpdir := t.TempDir() @@ -1825,7 +1860,8 @@ func TestRestoreKeycardAccountAndLogin(t *testing.T) { conf, err := params.NewNodeConfig(tmpdir, 1777) require.NoError(t, err) - backend := NewGethStatusBackend() + backend := NewGethStatusBackend(tt.MustCreateTestLogger()) + require.NoError(t, err) require.NoError(t, backend.AccountManager().InitKeystore(conf.KeyStoreDir)) backend.UpdateRootDataDir(conf.DataDir) diff --git a/api/create_account_and_login_test.go b/api/create_account_and_login_test.go index d241c3a1011..12f46c54c59 100644 --- a/api/create_account_and_login_test.go +++ b/api/create_account_and_login_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/status-im/status-go/protocol/requests" + "github.com/status-im/status-go/protocol/tt" ) func TestCreateAccountAndLogin(t *testing.T) { @@ -43,7 +44,7 @@ func TestCreateAccountAndLogin(t *testing.T) { var request requests.CreateAccount err := json.Unmarshal([]byte(requestJSON), &request) require.NoError(t, err) - statusBackend := NewGethStatusBackend() + statusBackend := NewGethStatusBackend(tt.MustCreateTestLogger()) _, err = statusBackend.CreateAccountAndLogin(&request) require.NoError(t, err) t.Logf("TestCreateAccountAndLogin: create account user1 and login successfully") diff --git a/api/geth_backend.go b/api/geth_backend.go index 1be579c3640..e2660351a9d 100644 --- a/api/geth_backend.go +++ b/api/geth_backend.go @@ -23,7 +23,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ethcrypto "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/status-im/status-go/account" @@ -97,33 +96,40 @@ type GethStatusBackend struct { connectionState connection.State appState appState selectedAccountKeyID string - log log.Logger allowAllRPC bool // used only for tests, disables api method restrictions LocalPairingStateManager *statecontrol.ProcessStateManager centralizedMetrics *centralizedmetrics.MetricService + + logger *zap.Logger } // NewGethStatusBackend create a new GethStatusBackend instance -func NewGethStatusBackend() *GethStatusBackend { - defer log.Info("Status backend initialized", "backend", "geth", "version", params.Version, "commit", params.GitCommit, "IpfsGatewayURL", params.IpfsGatewayURL) - - backend := &GethStatusBackend{} +func NewGethStatusBackend(logger *zap.Logger) *GethStatusBackend { + logger = logger.Named("GethStatusBackend") + backend := &GethStatusBackend{ + logger: logger, + } backend.initialize() + + logger.Info("Status backend initialized", + zap.String("backend geth version", params.Version), + zap.String("commit", params.GitCommit), + zap.String("IpfsGatewayURL", params.IpfsGatewayURL)) + return backend } func (b *GethStatusBackend) initialize() { - accountManager := account.NewGethManager() + accountManager := account.NewGethManager(b.logger) transactor := transactions.NewTransactor() personalAPI := personal.NewAPI() - statusNode := node.New(transactor) + statusNode := node.New(transactor, b.logger) b.statusNode = statusNode b.accountManager = accountManager b.transactor = transactor b.personalAPI = personalAPI b.statusNode.SetMultiaccountsDB(b.multiaccountsDB) - b.log = log.New("package", "status-go/api.GethStatusBackend") b.LocalPairingStateManager = new(statecontrol.ProcessStateManager) b.LocalPairingStateManager.SetPairing(false) } @@ -182,12 +188,12 @@ func (b *GethStatusBackend) OpenAccounts() error { } db, err := multiaccounts.InitializeDB(filepath.Join(b.rootDataDir, "accounts.sql")) if err != nil { - b.log.Error("failed to initialize accounts db", "err", err) + b.logger.Error("failed to initialize accounts db", zap.Error(err)) return err } b.multiaccountsDB = db - b.centralizedMetrics = centralizedmetrics.NewDefaultMetricService(b.multiaccountsDB.DB()) + b.centralizedMetrics = centralizedmetrics.NewDefaultMetricService(b.multiaccountsDB.DB(), b.logger) err = b.centralizedMetrics.EnsureStarted() if err != nil { return err @@ -198,7 +204,7 @@ func (b *GethStatusBackend) OpenAccounts() error { err = b.statusNode.StartMediaServerWithoutDB() if err != nil { - b.log.Error("failed to start media server without app db", "err", err) + b.logger.Error("failed to start media server without app db", zap.Error(err)) return err } @@ -238,6 +244,24 @@ func (b *GethStatusBackend) GetAccounts() ([]multiaccounts.Account, error) { return b.multiaccountsDB.GetAccounts() } +func (b *GethStatusBackend) AcceptTerms() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.multiaccountsDB == nil { + return errors.New("accounts db wasn't initialized") + } + + accounts, err := b.multiaccountsDB.GetAccounts() + if err != nil { + return err + } + if len(accounts) == 0 { + return errors.New("accounts is empty") + } + + return b.multiaccountsDB.UpdateHasAcceptedTerms(accounts[0].KeyUID, true) +} + func (b *GethStatusBackend) getAccountByKeyUID(keyUID string) (*multiaccounts.Account, error) { b.mu.Lock() defer b.mu.Unlock() @@ -329,7 +353,7 @@ func (b *GethStatusBackend) DeleteImportedKey(address, password, keyStoreDir str if strings.Contains(fileInfo.Name(), address) { _, err := b.accountManager.VerifyAccountPassword(keyStoreDir, "0x"+address, password) if err != nil { - b.log.Error("failed to verify account", "account", address, "error", err) + b.logger.Error("failed to verify account", zap.String("account", address), zap.Error(err)) return err } @@ -409,7 +433,7 @@ func (b *GethStatusBackend) ensureAppDBOpened(account multiaccounts.Account, pas appdatabase.CurrentAppDBKeyUID = account.KeyUID b.appDB, err = appdatabase.InitializeDB(dbFilePath, password, account.KDFIterations) if err != nil { - b.log.Error("failed to initialize db", "err", err.Error()) + b.logger.Error("failed to initialize db", zap.Error(err)) return err } b.statusNode.SetAppDB(b.appDB) @@ -456,7 +480,7 @@ func (b *GethStatusBackend) ensureWalletDBOpened(account multiaccounts.Account, b.walletDB, err = walletdatabase.InitializeDB(dbWalletPath, password, account.KDFIterations) if err != nil { - b.log.Error("failed to initialize wallet db", "err", err.Error()) + b.logger.Error("failed to initialize wallet db", zap.Error(err)) return err } b.statusNode.SetWalletDB(b.walletDB) @@ -665,7 +689,7 @@ func (b *GethStatusBackend) loginAccount(request *requests.Login) error { err = b.StartNode(b.config) if err != nil { - b.log.Info("failed to start node") + b.logger.Info("failed to start node") return errors.Wrap(err, "failed to start node") } @@ -693,7 +717,7 @@ func (b *GethStatusBackend) loginAccount(request *requests.Login) error { err = b.multiaccountsDB.UpdateAccountTimestamp(acc.KeyUID, time.Now().Unix()) if err != nil { - b.log.Error("failed to update account") + b.logger.Error("failed to update account") return errors.Wrap(err, "failed to update account") } @@ -721,9 +745,9 @@ func (b *GethStatusBackend) UpdateNodeConfigFleet(acc multiaccounts.Account, pas fleet := accountSettings.GetFleet() if !params.IsFleetSupported(fleet) { - b.log.Warn("fleet is not supported, overriding with default value", - "fleet", fleet, - "defaultFleet", DefaultFleet) + b.logger.Warn("fleet is not supported, overriding with default value", + zap.String("fleet", fleet), + zap.String("defaultFleet", DefaultFleet)) fleet = DefaultFleet } @@ -788,7 +812,7 @@ func (b *GethStatusBackend) startNodeWithAccount(acc multiaccounts.Account, pass err = b.StartNode(b.config) if err != nil { - b.log.Info("failed to start node") + b.logger.Info("failed to start node") return err } @@ -817,7 +841,7 @@ func (b *GethStatusBackend) startNodeWithAccount(acc multiaccounts.Account, pass err = b.multiaccountsDB.UpdateAccountTimestamp(acc.KeyUID, time.Now().Unix()) if err != nil { - b.log.Info("failed to update account") + b.logger.Info("failed to update account") return err } @@ -941,7 +965,7 @@ func (b *GethStatusBackend) ExportUnencryptedDatabase(acc multiaccounts.Account, err = sqlite.DecryptDB(dbPath, directory, password, acc.KDFIterations) if err != nil { - b.log.Error("failed to initialize db", "err", err) + b.logger.Error("failed to initialize db", zap.Error(err)) return err } return nil @@ -961,7 +985,7 @@ func (b *GethStatusBackend) ImportUnencryptedDatabase(acc multiaccounts.Account, err = sqlite.EncryptDB(databasePath, path, password, acc.KDFIterations, signal.SendReEncryptionStarted, signal.SendReEncryptionFinished) if err != nil { - b.log.Error("failed to initialize db", "err", err) + b.logger.Error("failed to initialize db", zap.Error(err)) return err } return nil @@ -1040,7 +1064,7 @@ func (b *GethStatusBackend) ChangeDatabasePassword(keyUID string, password strin // Revert the password to original err2 := b.changeAppDBPassword(account, noLogout, newPassword, password) if err2 != nil { - log.Error("failed to revert app db password", "err", err2) + b.logger.Error("failed to revert app db password", zap.Error(err2)) } return err @@ -1327,7 +1351,7 @@ func (b *GethStatusBackend) RestoreAccountAndLogin(request *requests.RestoreAcco ) if err != nil { - b.log.Error("start node", err) + b.logger.Error("start node", zap.Error(err)) return nil, err } @@ -1392,7 +1416,7 @@ func (b *GethStatusBackend) RestoreKeycardAccountAndLogin(request *requests.Rest ) if err != nil { - b.log.Error("start node", err) + b.logger.Error("start node", zap.Error(err)) return nil, errors.Wrap(err, "failed to start node") } @@ -1580,6 +1604,14 @@ func (b *GethStatusBackend) buildAccount(request *requests.CreateAccount, input acc.KDFIterations = dbsetup.ReducedKDFIterationsNumber } + count, err := b.multiaccountsDB.GetAccountsCount() + if err != nil { + return nil, err + } + if count == 0 { + acc.HasAcceptedTerms = true + } + if request.ImagePath != "" { imageCropRectangle := request.ImageCropRectangle if imageCropRectangle == nil { @@ -1736,7 +1768,7 @@ func (b *GethStatusBackend) CreateAccountAndLogin(request *requests.CreateAccoun ) if err != nil { - b.log.Error("start node", err) + b.logger.Error("start node", zap.Error(err)) return nil, err } @@ -2040,7 +2072,7 @@ func (b *GethStatusBackend) loadNodeConfig(inputNodeCfg *params.NodeConfig) erro if _, err = os.Stat(conf.RootDataDir); os.IsNotExist(err) { if err := os.MkdirAll(conf.RootDataDir, os.ModePerm); err != nil { - b.log.Warn("failed to create data directory", zap.Error(err)) + b.logger.Warn("failed to create data directory", zap.Error(err)) return err } } @@ -2079,8 +2111,8 @@ func (b *GethStatusBackend) startNode(config *params.NodeConfig) (err error) { } }() - b.log.Info("status-go version details", "version", params.Version, "commit", params.GitCommit) - b.log.Debug("starting node with config", "config", config) + b.logger.Info("status-go version details", zap.String("version", params.Version), zap.String("commit", params.GitCommit)) + b.logger.Debug("starting node with config", zap.Stringer("config", config)) // Update config with some defaults. if err := config.UpdateWithDefaults(); err != nil { return err @@ -2089,7 +2121,7 @@ func (b *GethStatusBackend) startNode(config *params.NodeConfig) (err error) { // Updating node config b.config = config - b.log.Debug("updated config with defaults", "config", config) + b.logger.Debug("updated config with defaults", zap.Stringer("config", config)) // Start by validating configuration if err := config.Validate(); err != nil { @@ -2125,10 +2157,10 @@ func (b *GethStatusBackend) startNode(config *params.NodeConfig) (err error) { b.personalAPI.SetRPC(b.statusNode.RPCClient(), rpc.DefaultCallTimeout) if err = b.registerHandlers(); err != nil { - b.log.Error("Handler registration failed", "err", err) + b.logger.Error("Handler registration failed", zap.Error(err)) return } - b.log.Info("Handlers registered") + b.logger.Info("Handlers registered") // Handle a case when a node is stopped and resumed. // If there is no account selected, an error is returned. @@ -2325,17 +2357,17 @@ func (b *GethStatusBackend) getVerifiedWalletAccount(address, password string) ( config := b.StatusNode().Config() db, err := accounts.NewDB(b.appDB) if err != nil { - b.log.Error("failed to create new *Database instance", "error", err) + b.logger.Error("failed to create new *Database instance", zap.Error(err)) return nil, err } exists, err := db.AddressExists(types.HexToAddress(address)) if err != nil { - b.log.Error("failed to query db for a given address", "address", address, "error", err) + b.logger.Error("failed to query db for a given address", zap.String("address", address), zap.Error(err)) return nil, err } if !exists { - b.log.Error("failed to get a selected account", "err", transactions.ErrInvalidTxSender) + b.logger.Error("failed to get a selected account", zap.Error(transactions.ErrInvalidTxSender)) return nil, transactions.ErrAccountDoesntExist } @@ -2348,7 +2380,7 @@ func (b *GethStatusBackend) getVerifiedWalletAccount(address, password string) ( } if err != nil { - b.log.Error("failed to verify account", "account", address, "error", err) + b.logger.Error("failed to verify account", zap.String("account", address), zap.Error(err)) return nil, err } @@ -2362,7 +2394,7 @@ func (b *GethStatusBackend) generatePartialAccountKey(db *accounts.Database, add dbPath, err := db.GetPath(types.HexToAddress(address)) path := "m/" + dbPath[strings.LastIndex(dbPath, "/")+1:] if err != nil { - b.log.Error("failed to get path for given account address", "account", address, "error", err) + b.logger.Error("failed to get path for given account address", zap.String("account", address), zap.Error(err)) return nil, err } @@ -2436,7 +2468,7 @@ func (b *GethStatusBackend) ConnectionChange(typ string, expensive bool) { state.Offline = true } - b.log.Info("Network state change", "old", b.connectionState, "new", state) + b.logger.Info("Network state change", zap.Stringer("old", b.connectionState), zap.Stringer("new", state)) if b.connectionState.Offline && !state.Offline { // flush hystrix if we are going again online, since it doesn't behave @@ -2457,14 +2489,14 @@ func (b *GethStatusBackend) AppStateChange(state string) { var messenger *protocol.Messenger s, err := parseAppState(state) if err != nil { - log.Error("AppStateChange failed, ignoring", "error", err) + b.logger.Error("AppStateChange failed, ignoring", zap.Error(err)) return } b.appState = s if b.statusNode == nil { - log.Warn("statusNode nil, not reporting app state change") + b.logger.Warn("statusNode nil, not reporting app state change") return } @@ -2477,7 +2509,7 @@ func (b *GethStatusBackend) AppStateChange(state string) { } if messenger == nil { - log.Warn("messenger nil, not reporting app state change") + b.logger.Warn("messenger nil, not reporting app state change") return } @@ -2511,7 +2543,7 @@ func (b *GethStatusBackend) Logout() error { b.mu.Lock() defer b.mu.Unlock() - b.log.Debug("logging out") + b.logger.Debug("logging out") err := b.cleanupServices() if err != nil { return err @@ -2540,7 +2572,7 @@ func (b *GethStatusBackend) Logout() error { err = b.statusNode.StartMediaServerWithoutDB() if err != nil { - b.log.Error("failed to start media server without app db", "err", err) + b.logger.Error("failed to start media server without app db", zap.Error(err)) return err } return nil diff --git a/api/old_mobile_user_upgrading_from_v1_to_v2_test.go b/api/old_mobile_user_upgrading_from_v1_to_v2_test.go index 58eb52acf99..d42c34c7be3 100644 --- a/api/old_mobile_user_upgrading_from_v1_to_v2_test.go +++ b/api/old_mobile_user_upgrading_from_v1_to_v2_test.go @@ -6,6 +6,8 @@ import ( "strings" "testing" + "go.uber.org/zap" + d_common "github.com/status-im/status-go/common" "github.com/status-im/status-go/appdatabase" @@ -47,6 +49,7 @@ const ( type OldMobileUserUpgradingFromV1ToV2Test struct { suite.Suite tmpdir string + logger *zap.Logger } type PostLoginCheckCallback func(b *GethStatusBackend) @@ -55,6 +58,10 @@ func (s *OldMobileUserUpgradingFromV1ToV2Test) SetupTest() { utils.Init() s.tmpdir = s.T().TempDir() copyDir(srcFolder, s.tmpdir, s.T()) + + var err error + s.logger, err = zap.NewDevelopment() + s.Require().NoError(err) } func TestOldMobileUserUpgradingFromV1ToV2(t *testing.T) { @@ -62,7 +69,7 @@ func TestOldMobileUserUpgradingFromV1ToV2(t *testing.T) { } func (s *OldMobileUserUpgradingFromV1ToV2Test) loginMobileUser(check PostLoginCheckCallback) { - b := NewGethStatusBackend() + b := NewGethStatusBackend(s.logger) b.UpdateRootDataDir(s.tmpdir) s.Require().NoError(b.OpenAccounts()) s.Require().NoError(b.Login(oldMobileUserKeyUID, oldMobileUserPasswd)) @@ -141,6 +148,11 @@ func (s *OldMobileUserUpgradingFromV1ToV2Test) TestLoginAndMigrationsStillWorkWi s.Require().True(len(keyKps[0].Accounts) == 1) info, err = generator.LoadAccount(keyKps[0].Accounts[0].Address.Hex(), oldMobileUserPasswd) s.Require().NoError(err) + + // The user should manually accept terms, so we make sure we don't set it + // automatically by mistake. + s.Require().False(info.ToMultiAccount().HasAcceptedTerms) + s.Require().Equal(keyKps[0].KeyUID, info.KeyUID) s.Require().Equal(keyKps[0].Accounts[0].KeyUID, info.KeyUID) info, err = generator.ImportPrivateKey("c3ad0b50652318f845565c13761e5369ce75dcbc2a94616e15b829d4b07410fe") @@ -154,7 +166,7 @@ func (s *OldMobileUserUpgradingFromV1ToV2Test) TestLoginAndMigrationsStillWorkWi // TestAddWalletAccount we should be able to add a wallet account after upgrading from mobile v1 func (s *OldMobileUserUpgradingFromV1ToV2Test) TestAddWalletAccountAfterUpgradingFromMobileV1() { - b := NewGethStatusBackend() + b := NewGethStatusBackend(s.logger) b.UpdateRootDataDir(s.tmpdir) s.Require().NoError(b.OpenAccounts()) s.Require().NoError(b.Login(oldMobileUserKeyUID, oldMobileUserPasswd)) diff --git a/api/test_helpers.go b/api/test_helpers.go index 621a0e33cc1..8e7bcdfaa4c 100644 --- a/api/test_helpers.go +++ b/api/test_helpers.go @@ -11,6 +11,7 @@ import ( "github.com/status-im/status-go/multiaccounts/settings" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/requests" + "github.com/status-im/status-go/protocol/tt" "github.com/stretchr/testify/require" ) @@ -28,7 +29,7 @@ func setupWalletTest(t *testing.T, password string) (backend *GethStatusBackend, return } - backend = NewGethStatusBackend() + backend = NewGethStatusBackend(tt.MustCreateTestLogger()) backend.UpdateRootDataDir(tmpdir) err = backend.AccountManager().InitKeystore(filepath.Join(tmpdir, "keystore")) diff --git a/appdatabase/database.go b/appdatabase/database.go index fbbe66e675e..b84cbde5ded 100644 --- a/appdatabase/database.go +++ b/appdatabase/database.go @@ -6,13 +6,15 @@ import ( "encoding/json" "math/big" + "go.uber.org/zap" + d_common "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/appdatabase/migrations" migrationsprevnodecfg "github.com/status-im/status-go/appdatabase/migrationsprevnodecfg" @@ -94,7 +96,7 @@ func OptimizeMobileWakuV2SettingsForMobileV1(sqlTx *sql.Tx) error { if d_common.IsMobilePlatform() { _, err := sqlTx.Exec(`UPDATE wakuv2_config SET light_client = ?, enable_store_confirmation_for_messages_sent = ?`, true, false) if err != nil { - log.Error("failed to enable light client and disable store confirmation for mobile v1", "err", err.Error()) + logutils.ZapLogger().Error("failed to enable light client and disable store confirmation for mobile v1", zap.Error(err)) return err } } @@ -104,7 +106,7 @@ func OptimizeMobileWakuV2SettingsForMobileV1(sqlTx *sql.Tx) error { func FixMissingKeyUIDForAccounts(sqlTx *sql.Tx) error { rows, err := sqlTx.Query(`SELECT address,pubkey FROM accounts WHERE pubkey IS NOT NULL AND type != '' AND type != 'generated'`) if err != nil { - log.Error("Migrating accounts: failed to query accounts", "err", err.Error()) + logutils.ZapLogger().Error("Migrating accounts: failed to query accounts", zap.Error(err)) return err } defer rows.Close() @@ -113,19 +115,19 @@ func FixMissingKeyUIDForAccounts(sqlTx *sql.Tx) error { var pubkey e_types.HexBytes err = rows.Scan(&address, &pubkey) if err != nil { - log.Error("Migrating accounts: failed to scan records", "err", err.Error()) + logutils.ZapLogger().Error("Migrating accounts: failed to scan records", zap.Error(err)) return err } pk, err := crypto.UnmarshalPubkey(pubkey) if err != nil { - log.Error("Migrating accounts: failed to unmarshal pubkey", "err", err.Error(), "pubkey", string(pubkey)) + logutils.ZapLogger().Error("Migrating accounts: failed to unmarshal pubkey", zap.String("pubkey", string(pubkey)), zap.Error(err)) return err } pkBytes := sha256.Sum256(crypto.FromECDSAPub(pk)) keyUIDHex := hexutil.Encode(pkBytes[:]) _, err = sqlTx.Exec(`UPDATE accounts SET key_uid = ? WHERE address = ?`, keyUIDHex, address) if err != nil { - log.Error("Migrating accounts: failed to update key_uid for imported accounts", "err", err.Error()) + logutils.ZapLogger().Error("Migrating accounts: failed to update key_uid for imported accounts", zap.Error(err)) return err } } @@ -134,23 +136,23 @@ func FixMissingKeyUIDForAccounts(sqlTx *sql.Tx) error { err = sqlTx.QueryRow(`SELECT wallet_root_address FROM settings WHERE synthetic_id='id'`).Scan(&walletRootAddress) if err == sql.ErrNoRows { // we shouldn't reach here, but if we do, it probably happened from the test - log.Warn("Migrating accounts: no wallet_root_address found in settings") + logutils.ZapLogger().Warn("Migrating accounts: no wallet_root_address found in settings") return nil } if err != nil { - log.Error("Migrating accounts: failed to get wallet_root_address", "err", err.Error()) + logutils.ZapLogger().Error("Migrating accounts: failed to get wallet_root_address", zap.Error(err)) return err } _, err = sqlTx.Exec(`UPDATE accounts SET key_uid = ?, derived_from = ? WHERE type = '' OR type = 'generated'`, CurrentAppDBKeyUID, walletRootAddress.Hex()) if err != nil { - log.Error("Migrating accounts: failed to update key_uid/derived_from", "err", err.Error()) + logutils.ZapLogger().Error("Migrating accounts: failed to update key_uid/derived_from", zap.Error(err)) return err } // fix the default wallet account color issue https://github.com/status-im/status-mobile/issues/20476 // we don't care the other type of account's color _, err = sqlTx.Exec(`UPDATE accounts SET color = 'blue',emoji='šŸ³' WHERE wallet = 1`) if err != nil { - log.Error("Migrating accounts: failed to update default wallet account's color to blue", "err", err.Error()) + logutils.ZapLogger().Error("Migrating accounts: failed to update default wallet account's color to blue", zap.Error(err)) return err } return nil @@ -192,7 +194,7 @@ func migrateEnsUsernames(sqlTx *sql.Tx) error { rows, err := sqlTx.Query(`SELECT usernames FROM settings`) if err != nil { - log.Error("Migrating ens usernames: failed to query 'settings.usernames'", "err", err.Error()) + logutils.ZapLogger().Error("Migrating ens usernames: failed to query 'settings.usernames'", zap.Error(err)) return err } @@ -240,7 +242,7 @@ func migrateEnsUsernames(sqlTx *sql.Tx) error { _, err = sqlTx.Exec(`INSERT INTO ens_usernames (username, chain_id) VALUES (?, ?)`, username, defaultChainID) if err != nil { - log.Error("Migrating ens usernames: failed to insert username into new database", "ensUsername", username, "err", err.Error()) + logutils.ZapLogger().Error("Migrating ens usernames: failed to insert username into new database", zap.String("ensUsername", username), zap.Error(err)) } } diff --git a/centralizedmetrics/metrics.go b/centralizedmetrics/metrics.go index 81600d44441..ccb394d6159 100644 --- a/centralizedmetrics/metrics.go +++ b/centralizedmetrics/metrics.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" "github.com/status-im/status-go/centralizedmetrics/common" "github.com/status-im/status-go/centralizedmetrics/providers" @@ -35,20 +35,23 @@ type MetricService struct { started bool wg sync.WaitGroup interval time.Duration + + logger *zap.Logger } -func NewDefaultMetricService(db *sql.DB) *MetricService { +func NewDefaultMetricService(db *sql.DB, logger *zap.Logger) *MetricService { repository := NewSQLiteMetricRepository(db) - processor := providers.NewMixpanelMetricProcessor(providers.MixpanelAppID, providers.MixpanelToken, providers.MixpanelBaseURL) - return NewMetricService(repository, processor, defaultPollInterval) + processor := providers.NewMixpanelMetricProcessor(providers.MixpanelAppID, providers.MixpanelToken, providers.MixpanelBaseURL, logger) + return NewMetricService(repository, processor, defaultPollInterval, logger) } -func NewMetricService(repository MetricRepository, processor common.MetricProcessor, interval time.Duration) *MetricService { +func NewMetricService(repository MetricRepository, processor common.MetricProcessor, interval time.Duration, logger *zap.Logger) *MetricService { return &MetricService{ repository: repository, processor: processor, interval: interval, done: make(chan bool), + logger: logger.Named("MetricService"), } } @@ -116,27 +119,27 @@ func (s *MetricService) AddMetric(metric common.Metric) error { } func (s *MetricService) processMetrics() { - log.Info("processing metrics") + s.logger.Info("processing metrics") metrics, err := s.repository.Poll() if err != nil { - log.Warn("error polling metrics", "error", err) + s.logger.Warn("error polling metrics", zap.Error(err)) return } - log.Info("polled metrics") + s.logger.Info("polled metrics") if len(metrics) == 0 { return } - log.Info("processing metrics") + s.logger.Info("processing metrics") if err := s.processor.Process(metrics); err != nil { - log.Warn("error processing metrics", "error", err) + s.logger.Warn("error processing metrics", zap.Error(err)) return } - log.Info("deleting metrics") + s.logger.Info("deleting metrics") if err := s.repository.Delete(metrics); err != nil { - log.Warn("error deleting metrics", "error", err) + s.logger.Warn("error deleting metrics", zap.Error(err)) } - log.Info("done metrics") + s.logger.Info("done metrics") } diff --git a/centralizedmetrics/metrics_test.go b/centralizedmetrics/metrics_test.go index 07b2b907abf..7b303ef720d 100644 --- a/centralizedmetrics/metrics_test.go +++ b/centralizedmetrics/metrics_test.go @@ -15,11 +15,15 @@ import ( var testMetric = common.Metric{ID: "user-id", EventName: "test-name", EventValue: map[string]interface{}{"test-name": "test-value"}, Platform: "android", AppVersion: "2.30.0"} +func newMetricService(t *testing.T, repository MetricRepository, processor common.MetricProcessor, interval time.Duration) *MetricService { + return NewMetricService(repository, processor, interval, tt.MustCreateTestLogger()) +} + // TestMetricService covers the main functionalities of MetricService func TestMetricService(t *testing.T) { repository := &TestMetricRepository{} processor := &TestMetricProcessor{} - service := NewMetricService(repository, processor, 1*time.Second) + service := newMetricService(t, repository, processor, 1*time.Second) // Start the service service.Start() @@ -111,7 +115,7 @@ func (p *TestMetricProcessor) Process(metrics []common.Metric) error { func TestAddMetric(t *testing.T) { repository := &TestMetricRepository{} processor := &TestMetricProcessor{} - service := NewMetricService(repository, processor, 1*time.Second) + service := newMetricService(t, repository, processor, 1*time.Second) err := service.AddMetric(testMetric) if err != nil { @@ -132,7 +136,7 @@ func TestAddMetric(t *testing.T) { func TestProcessMetrics(t *testing.T) { repository := &TestMetricRepository{} processor := &TestMetricProcessor{} - service := NewMetricService(repository, processor, 1*time.Second) + service := newMetricService(t, repository, processor, 1*time.Second) // Add metrics directly to repository for polling require.NoError(t, repository.Add(common.Metric{ID: "3", EventValue: map[string]interface{}{"price": 6.28}})) @@ -154,7 +158,7 @@ func TestProcessMetrics(t *testing.T) { func TestStartStop(t *testing.T) { repository := &TestMetricRepository{} processor := &TestMetricProcessor{} - service := NewMetricService(repository, processor, 1*time.Second) + service := newMetricService(t, repository, processor, 1*time.Second) service.Start() require.True(t, service.started) @@ -173,7 +177,7 @@ func TestStartStop(t *testing.T) { func TestServiceWithoutMetrics(t *testing.T) { repository := &TestMetricRepository{} processor := &TestMetricProcessor{} - service := NewMetricService(repository, processor, 1*time.Second) + service := newMetricService(t, repository, processor, 1*time.Second) service.Start() defer service.Stop() @@ -187,7 +191,7 @@ func TestServiceWithoutMetrics(t *testing.T) { func TestServiceEnabled(t *testing.T) { repository := &TestMetricRepository{} processor := &TestMetricProcessor{} - service := NewMetricService(repository, processor, 1*time.Second) + service := newMetricService(t, repository, processor, 1*time.Second) err := service.ToggleEnabled(true) require.NoError(t, err) @@ -201,7 +205,7 @@ func TestServiceEnabled(t *testing.T) { func TestServiceEnsureStarted(t *testing.T) { repository := &TestMetricRepository{} processor := &TestMetricProcessor{} - service := NewMetricService(repository, processor, 1*time.Second) + service := newMetricService(t, repository, processor, 1*time.Second) err := service.EnsureStarted() require.NoError(t, err) diff --git a/centralizedmetrics/providers/appsflyer.go b/centralizedmetrics/providers/appsflyer.go index 52bffec8da5..23fb9316bc9 100644 --- a/centralizedmetrics/providers/appsflyer.go +++ b/centralizedmetrics/providers/appsflyer.go @@ -5,10 +5,11 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" "github.com/status-im/status-go/centralizedmetrics/common" ) @@ -23,14 +24,17 @@ type AppsflyerMetricProcessor struct { appID string secret string baseURL string + + logger *zap.Logger } // NewAppsflyerMetricProcessor is a constructor for AppsflyerMetricProcessor -func NewAppsflyerMetricProcessor(appID, secret, baseURL string) *AppsflyerMetricProcessor { +func NewAppsflyerMetricProcessor(appID, secret, baseURL string, logger *zap.Logger) *AppsflyerMetricProcessor { return &AppsflyerMetricProcessor{ appID: appID, secret: secret, baseURL: baseURL, + logger: logger, } } @@ -85,7 +89,8 @@ func (p *AppsflyerMetricProcessor) sendToAppsflyer(metric common.Metric) error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - log.Warn("failed to send metric", "status-code", resp.StatusCode, "body", resp.Body) + body, err := io.ReadAll(resp.Body) + p.logger.Warn("failed to send metric", zap.Int("status-code", resp.StatusCode), zap.String("body", string(body)), zap.Error(err)) return errors.New("failed to send metric to Appsflyer") } diff --git a/centralizedmetrics/providers/appsflyers_test.go b/centralizedmetrics/providers/appsflyers_test.go index 4e11867ec69..b35eb743166 100644 --- a/centralizedmetrics/providers/appsflyers_test.go +++ b/centralizedmetrics/providers/appsflyers_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/status-im/status-go/centralizedmetrics/common" + "github.com/status-im/status-go/protocol/tt" ) func TestAppsflyerMetricProcessor(t *testing.T) { @@ -42,7 +43,7 @@ func TestAppsflyerMetricProcessor(t *testing.T) { defer testServer.Close() // Initialize the AppsflyerMetricProcessor with the test server URL - processor := NewAppsflyerMetricProcessor("testAppID", "testSecret", testServer.URL) + processor := NewAppsflyerMetricProcessor("testAppID", "testSecret", testServer.URL, tt.MustCreateTestLogger()) // Example metrics metrics := []common.Metric{ diff --git a/centralizedmetrics/providers/mixpanel.go b/centralizedmetrics/providers/mixpanel.go index c25283d5341..ed6e211cf0e 100644 --- a/centralizedmetrics/providers/mixpanel.go +++ b/centralizedmetrics/providers/mixpanel.go @@ -8,7 +8,7 @@ import ( "io" "net/http" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" "github.com/status-im/status-go/centralizedmetrics/common" ) @@ -23,14 +23,17 @@ type MixpanelMetricProcessor struct { appID string secret string baseURL string + + logger *zap.Logger } // NewMixpanelMetricProcessor is a constructor for MixpanelMetricProcessor -func NewMixpanelMetricProcessor(appID, secret, baseURL string) *MixpanelMetricProcessor { +func NewMixpanelMetricProcessor(appID, secret, baseURL string, logger *zap.Logger) *MixpanelMetricProcessor { return &MixpanelMetricProcessor{ appID: appID, secret: secret, baseURL: baseURL, + logger: logger, } } @@ -71,7 +74,7 @@ func (amp *MixpanelMetricProcessor) sendToMixpanel(metrics []common.Metric) erro return err } - log.Info("sending metrics to", "url", url, "metric", mixPanelMetrics, "secret", amp.GetToken()) + amp.logger.Info("sending metrics to", zap.String("url", url), zap.Any("metric", mixPanelMetrics), zap.String("secret", amp.GetToken())) req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload)) if err != nil { @@ -90,8 +93,7 @@ func (amp *MixpanelMetricProcessor) sendToMixpanel(metrics []common.Metric) erro if resp.StatusCode != http.StatusOK { body, err := io.ReadAll(resp.Body) - fmt.Println(resp.StatusCode, string(body), err) - log.Warn("failed to send metric", "status-code", resp.StatusCode, "body", resp.Body) + amp.logger.Warn("failed to send metric", zap.Int("status-code", resp.StatusCode), zap.String("body", string(body)), zap.Error(err)) return errors.New("failed to send metric to Mixpanel") } diff --git a/centralizedmetrics/providers/mixpanel_test.go b/centralizedmetrics/providers/mixpanel_test.go index 51aca344362..c9aeceb3fd2 100644 --- a/centralizedmetrics/providers/mixpanel_test.go +++ b/centralizedmetrics/providers/mixpanel_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/status-im/status-go/centralizedmetrics/common" + "github.com/status-im/status-go/protocol/tt" ) func TestMixpanelMetricProcessor(t *testing.T) { @@ -55,7 +56,7 @@ func TestMixpanelMetricProcessor(t *testing.T) { defer testServer.Close() // Initialize the MixpanelMetricProcessor with the test server URL - processor := NewMixpanelMetricProcessor("testAppID", "testSecret", testServer.URL) + processor := NewMixpanelMetricProcessor("testAppID", "testSecret", testServer.URL, tt.MustCreateTestLogger()) // Example metrics metrics := []common.Metric{ diff --git a/circuitbreaker/circuit_breaker.go b/circuitbreaker/circuit_breaker.go index 0b6a97e2c6d..d76c1a152de 100644 --- a/circuitbreaker/circuit_breaker.go +++ b/circuitbreaker/circuit_breaker.go @@ -6,8 +6,9 @@ import ( "time" "github.com/afex/hystrix-go/hystrix" + "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) type FallbackFunc func() ([]any, error) @@ -177,7 +178,7 @@ func (cb *CircuitBreaker) Execute(cmd *Command) CommandResult { return nil } if err != nil { - log.Warn("hystrix error", "error", err, "provider", circuitName) + logutils.ZapLogger().Warn("hystrix error", zap.String("provider", circuitName), zap.Error(err)) } return err }, nil) diff --git a/cmd/lint-panics/analyzer/analyzer.go b/cmd/lint-panics/analyzer/analyzer.go new file mode 100644 index 00000000000..d7e9df69316 --- /dev/null +++ b/cmd/lint-panics/analyzer/analyzer.go @@ -0,0 +1,245 @@ +package analyzer + +import ( + "context" + "fmt" + "go/ast" + "os" + + "go.uber.org/zap" + + goparser "go/parser" + gotoken "go/token" + "strings" + + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "github.com/status-im/status-go/cmd/lint-panics/gopls" + "github.com/status-im/status-go/cmd/lint-panics/utils" +) + +const Pattern = "LogOnPanic" + +type Analyzer struct { + logger *zap.Logger + lsp LSP + cfg *Config +} + +type LSP interface { + Definition(context.Context, string, int, int) (string, int, error) +} + +func New(ctx context.Context, logger *zap.Logger) (*analysis.Analyzer, error) { + cfg := Config{} + flags, err := cfg.ParseFlags() + if err != nil { + return nil, err + } + + logger.Info("creating analyzer", zap.String("root", cfg.RootDir)) + + goplsClient := gopls.NewGoplsClient(ctx, logger, cfg.RootDir) + processor := newAnalyzer(logger, goplsClient, &cfg) + + analyzer := &analysis.Analyzer{ + Name: "logpanics", + Doc: fmt.Sprintf("reports missing defer call to %s", Pattern), + Flags: flags, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: func(pass *analysis.Pass) (interface{}, error) { + return processor.Run(ctx, pass) + }, + } + + return analyzer, nil +} + +func newAnalyzer(logger *zap.Logger, lsp LSP, cfg *Config) *Analyzer { + return &Analyzer{ + logger: logger.Named("processor"), + lsp: lsp, + cfg: cfg.WithAbsolutePaths(), + } +} + +func (p *Analyzer) Run(ctx context.Context, pass *analysis.Pass) (interface{}, error) { + inspected, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errors.New("analyzer is not type *inspector.Inspector") + } + + // Create a nodes filter for goroutines (GoStmt represents a 'go' statement) + nodeFilter := []ast.Node{ + (*ast.GoStmt)(nil), + } + + // Inspect go statements + inspected.Preorder(nodeFilter, func(n ast.Node) { + p.ProcessNode(ctx, pass, n) + }) + + return nil, nil +} + +func (p *Analyzer) ProcessNode(ctx context.Context, pass *analysis.Pass, n ast.Node) { + goStmt, ok := n.(*ast.GoStmt) + if !ok { + panic("unexpected node type") + } + + switch fun := goStmt.Call.Fun.(type) { + case *ast.FuncLit: // anonymous function + pos := pass.Fset.Position(fun.Pos()) + logger := p.logger.With( + utils.ZapURI(pos.Filename, pos.Line), + zap.Int("column", pos.Column), + ) + + logger.Debug("found anonymous goroutine") + if err := p.checkGoroutine(fun.Body); err != nil { + p.logLinterError(pass, fun.Pos(), fun.Pos(), err) + } + + case *ast.SelectorExpr: // method call + pos := pass.Fset.Position(fun.Sel.Pos()) + p.logger.Info("found method call as goroutine", + zap.String("methodName", fun.Sel.Name), + utils.ZapURI(pos.Filename, pos.Line), + zap.Int("column", pos.Column), + ) + + defPos, err := p.checkGoroutineDefinition(ctx, pos, pass) + if err != nil { + p.logLinterError(pass, defPos, fun.Sel.Pos(), err) + } + + case *ast.Ident: // function call + pos := pass.Fset.Position(fun.Pos()) + p.logger.Info("found function call as goroutine", + zap.String("functionName", fun.Name), + utils.ZapURI(pos.Filename, pos.Line), + zap.Int("column", pos.Column), + ) + + defPos, err := p.checkGoroutineDefinition(ctx, pos, pass) + if err != nil { + p.logLinterError(pass, defPos, fun.Pos(), err) + } + + default: + p.logger.Error("unexpected goroutine type", + zap.String("type", fmt.Sprintf("%T", fun)), + ) + } +} + +func (p *Analyzer) parseFile(path string, pass *analysis.Pass) (*ast.File, error) { + logger := p.logger.With(zap.String("path", path)) + + src, err := os.ReadFile(path) + if err != nil { + logger.Error("failed to open file", zap.Error(err)) + } + + file, err := goparser.ParseFile(pass.Fset, path, src, 0) + if err != nil { + logger.Error("failed to parse file", zap.Error(err)) + return nil, err + } + + return file, nil +} + +func (p *Analyzer) checkGoroutine(body *ast.BlockStmt) error { + if body == nil { + p.logger.Warn("missing function body") + return nil + } + if len(body.List) == 0 { + // empty goroutine is weird, but it never panics, so not a linter error + return nil + } + + deferStatement, ok := body.List[0].(*ast.DeferStmt) + if !ok { + return errors.New("first statement is not defer") + } + + selectorExpr, ok := deferStatement.Call.Fun.(*ast.SelectorExpr) + if !ok { + return errors.New("first statement call is not a selector") + } + + firstLineFunName := selectorExpr.Sel.Name + if firstLineFunName != Pattern { + return errors.Errorf("first statement is not %s", Pattern) + } + + return nil +} + +func (p *Analyzer) getFunctionBody(node ast.Node, lineNumber int, pass *analysis.Pass) (body *ast.BlockStmt, pos gotoken.Pos) { + ast.Inspect(node, func(n ast.Node) bool { + // Check if the node is a function declaration + funcDecl, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + + if pass.Fset.Position(n.Pos()).Line != lineNumber { + return true + } + + body = funcDecl.Body + pos = n.Pos() + return false + }) + + return body, pos + +} + +func (p *Analyzer) checkGoroutineDefinition(ctx context.Context, pos gotoken.Position, pass *analysis.Pass) (gotoken.Pos, error) { + defFilePath, defLineNumber, err := p.lsp.Definition(ctx, pos.Filename, pos.Line, pos.Column) + if err != nil { + p.logger.Error("failed to find function definition", zap.Error(err)) + return 0, err + } + + file, err := p.parseFile(defFilePath, pass) + if err != nil { + p.logger.Error("failed to parse file", zap.Error(err)) + return 0, err + } + + body, defPosition := p.getFunctionBody(file, defLineNumber, pass) + return defPosition, p.checkGoroutine(body) +} + +func (p *Analyzer) logLinterError(pass *analysis.Pass, errPos gotoken.Pos, callPos gotoken.Pos, err error) { + errPosition := pass.Fset.Position(errPos) + callPosition := pass.Fset.Position(callPos) + + if p.skip(errPosition.Filename) || p.skip(callPosition.Filename) { + return + } + + message := fmt.Sprintf("missing %s()", Pattern) + p.logger.Warn(message, + utils.ZapURI(errPosition.Filename, errPosition.Line), + zap.String("details", err.Error())) + + if callPos == errPos { + pass.Reportf(errPos, "missing defer call to %s", Pattern) + } else { + pass.Reportf(callPos, "missing defer call to %s", Pattern) + } +} + +func (p *Analyzer) skip(filepath string) bool { + return p.cfg.SkipDir != "" && strings.HasPrefix(filepath, p.cfg.SkipDir) +} diff --git a/cmd/lint-panics/analyzer/analyzer_test.go b/cmd/lint-panics/analyzer/analyzer_test.go new file mode 100644 index 00000000000..9d856dff4ff --- /dev/null +++ b/cmd/lint-panics/analyzer/analyzer_test.go @@ -0,0 +1,28 @@ +package analyzer + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "golang.org/x/tools/go/analysis/analysistest" + + "github.com/status-im/status-go/cmd/lint-panics/utils" +) + +func TestMethods(t *testing.T) { + t.Parallel() + + logger := utils.BuildLogger(zap.DebugLevel) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + a, err := New(ctx, logger) + require.NoError(t, err) + + analysistest.Run(t, analysistest.TestData(), a, "functions") +} diff --git a/cmd/lint-panics/analyzer/config.go b/cmd/lint-panics/analyzer/config.go new file mode 100644 index 00000000000..79452a01ee6 --- /dev/null +++ b/cmd/lint-panics/analyzer/config.go @@ -0,0 +1,60 @@ +package analyzer + +import ( + "flag" + "io" + "os" + "path" + "strings" +) + +type Config struct { + RootDir string + SkipDir string +} + +var workdir string + +func init() { + var err error + workdir, err = os.Getwd() + if err != nil { + panic(err) + } +} + +func (c *Config) ParseFlags() (flag.FlagSet, error) { + flags := flag.NewFlagSet("lint-panics", flag.ContinueOnError) + flags.SetOutput(io.Discard) // Otherwise errors are printed to stderr + flags.StringVar(&c.RootDir, "root", workdir, "root directory to run gopls") + flags.StringVar(&c.SkipDir, "skip", "", "skip paths with this suffix") + + // We parse the flags here to have `rootDir` before the call to `singlechecker.Main(analyzer)` + // For same reasons we discard the output and skip the undefined flag error. + err := flags.Parse(os.Args[1:]) + if err == nil { + return *flags, nil + } + + if strings.Contains(err.Error(), "flag provided but not defined") { + err = nil + } else if strings.Contains(err.Error(), "help requested") { + err = nil + } + + return *flags, err +} + +func (c *Config) WithAbsolutePaths() *Config { + out := *c + + if !path.IsAbs(out.RootDir) { + out.RootDir = path.Join(workdir, out.RootDir) + } + + if out.SkipDir != "" && !path.IsAbs(out.SkipDir) { + out.SkipDir = path.Join(out.RootDir, out.SkipDir) + } + + return &out +} diff --git a/cmd/lint-panics/analyzer/testdata/src/common/common.go b/cmd/lint-panics/analyzer/testdata/src/common/common.go new file mode 100644 index 00000000000..41fd4949ef2 --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/common/common.go @@ -0,0 +1,5 @@ +package common + +func LogOnPanic() { + // do nothing +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/anonymous.go b/cmd/lint-panics/analyzer/testdata/src/functions/anonymous.go new file mode 100644 index 00000000000..e4249b1eb62 --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/anonymous.go @@ -0,0 +1,24 @@ +package functions + +import ( + "common" + "fmt" +) + +func init() { + go func() { + defer common.LogOnPanic() + }() + + go func() { + + }() + + go func() { // want "missing defer call to LogOnPanic" + fmt.Println("anon") + }() + + go func() { // want "missing defer call to LogOnPanic" + common.LogOnPanic() + }() +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/free.go b/cmd/lint-panics/analyzer/testdata/src/functions/free.go new file mode 100644 index 00000000000..26a12c3ca38 --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/free.go @@ -0,0 +1,29 @@ +package functions + +import ( + "common" + "fmt" +) + +func init() { + go ok() + go empty() + go noLogOnPanic() // want "missing defer call to LogOnPanic" + go notDefer() // want "missing defer call to LogOnPanic" +} + +func ok() { + defer common.LogOnPanic() +} + +func empty() { + +} + +func noLogOnPanic() { + defer fmt.Println("Bar") +} + +func notDefer() { + common.LogOnPanic() +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/methods.go b/cmd/lint-panics/analyzer/testdata/src/functions/methods.go new file mode 100644 index 00000000000..13e5383231e --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/methods.go @@ -0,0 +1,33 @@ +package functions + +import ( + "common" + "fmt" +) + +type Test struct { +} + +func init() { + t := Test{} + go t.ok() + go t.empty() + go t.noLogOnPanic() // want "missing defer call to LogOnPanic" + go t.notDefer() // want "missing defer call to LogOnPanic" +} + +func (p *Test) ok() { + defer common.LogOnPanic() +} + +func (p *Test) empty() { + +} + +func (p *Test) noLogOnPanic() { + defer fmt.Println("FooNoLogOnPanic") +} + +func (p *Test) notDefer() { + common.LogOnPanic() +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/pointers.go b/cmd/lint-panics/analyzer/testdata/src/functions/pointers.go new file mode 100644 index 00000000000..d2985395d7d --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/pointers.go @@ -0,0 +1,21 @@ +package functions + +import ( + "common" +) + +func init() { + runAsync(ok) + runAsyncOk(ok) +} + +func runAsync(fn func()) { + go fn() // want "missing defer call to LogOnPanic" +} + +func runAsyncOk(fn func()) { + go func() { + defer common.LogOnPanic() + fn() + }() +} diff --git a/cmd/lint-panics/gopls/dummy_client.go b/cmd/lint-panics/gopls/dummy_client.go new file mode 100644 index 00000000000..1ce026ca453 --- /dev/null +++ b/cmd/lint-panics/gopls/dummy_client.go @@ -0,0 +1,81 @@ +package gopls + +import ( + "context" + + "go.lsp.dev/protocol" + + "go.uber.org/zap" +) + +type DummyClient struct { + logger *zap.Logger +} + +func NewDummyClient(logger *zap.Logger) *DummyClient { + if logger == nil { + logger = zap.NewNop() + } + return &DummyClient{ + logger: logger, + } +} + +func (d *DummyClient) Progress(ctx context.Context, params *protocol.ProgressParams) (err error) { + d.logger.Debug("client: Progress", zap.Any("params", params)) + return +} +func (d *DummyClient) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) (err error) { + d.logger.Debug("client: WorkDoneProgressCreate") + return nil +} + +func (d *DummyClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) (err error) { + d.logger.Debug("client: LogMessage", zap.Any("message", params)) + return nil +} + +func (d *DummyClient) PublishDiagnostics(ctx context.Context, params *protocol.PublishDiagnosticsParams) (err error) { + d.logger.Debug("client: PublishDiagnostics") + return nil +} + +func (d *DummyClient) ShowMessage(ctx context.Context, params *protocol.ShowMessageParams) (err error) { + d.logger.Debug("client: ShowMessage", zap.Any("message", params)) + return nil +} + +func (d *DummyClient) ShowMessageRequest(ctx context.Context, params *protocol.ShowMessageRequestParams) (result *protocol.MessageActionItem, err error) { + d.logger.Debug("client: ShowMessageRequest", zap.Any("message", params)) + return nil, nil +} + +func (d *DummyClient) Telemetry(ctx context.Context, params interface{}) (err error) { + d.logger.Debug("client: Telemetry") + return nil +} + +func (d *DummyClient) RegisterCapability(ctx context.Context, params *protocol.RegistrationParams) (err error) { + d.logger.Debug("client: RegisterCapability") + return nil +} + +func (d *DummyClient) UnregisterCapability(ctx context.Context, params *protocol.UnregistrationParams) (err error) { + d.logger.Debug("client: UnregisterCapability") + return nil +} + +func (d *DummyClient) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (result bool, err error) { + d.logger.Debug("client: ApplyEdit") + return false, nil +} + +func (d *DummyClient) Configuration(ctx context.Context, params *protocol.ConfigurationParams) (result []interface{}, err error) { + d.logger.Debug("client: Configuration") + return nil, nil +} + +func (d *DummyClient) WorkspaceFolders(ctx context.Context) (result []protocol.WorkspaceFolder, err error) { + d.logger.Debug("client: WorkspaceFolders") + return nil, nil +} diff --git a/cmd/lint-panics/gopls/gopls.go b/cmd/lint-panics/gopls/gopls.go new file mode 100644 index 00000000000..ac8e4750d25 --- /dev/null +++ b/cmd/lint-panics/gopls/gopls.go @@ -0,0 +1,155 @@ +package gopls + +import ( + "os/exec" + + "github.com/pkg/errors" + + "context" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/protocol" + + "time" + + "go.lsp.dev/uri" + "go.uber.org/zap" +) + +type Connection struct { + logger *zap.Logger + server protocol.Server + cmd *exec.Cmd + conn jsonrpc2.Conn +} + +func NewGoplsClient(ctx context.Context, logger *zap.Logger, rootDir string) *Connection { + var err error + + logger.Debug("initializing gopls client") + + gopls := &Connection{ + logger: logger, + } + + client := NewDummyClient(logger) + + // Step 1: Create a JSON-RPC connection using stdin and stdout + gopls.cmd = exec.Command("gopls", "serve") + + stdin, err := gopls.cmd.StdinPipe() + if err != nil { + logger.Error("Failed to get stdin pipe", zap.Error(err)) + panic(err) + } + + stdout, err := gopls.cmd.StdoutPipe() + if err != nil { + logger.Error("Failed to get stdout pipe", zap.Error(err)) + panic(err) + } + + err = gopls.cmd.Start() + if err != nil { + logger.Error("Failed to start gopls", zap.Error(err)) + panic(err) + } + + stream := jsonrpc2.NewStream(&IOStream{ + stdin: stdin, + stdout: stdout, + }) + + // Step 2: Create a client for the running gopls server + ctx, gopls.conn, gopls.server = protocol.NewClient(ctx, client, stream, logger) + + // Step 3: Initialize the gopls server + initParams := protocol.InitializeParams{ + RootURI: uri.From("file", "", rootDir, "", ""), + InitializationOptions: map[string]interface{}{ + "symbolMatcher": "FastFuzzy", + }, + } + + _, err = gopls.server.Initialize(ctx, &initParams) + if err != nil { + logger.Error("Error during initialize", zap.Error(err)) + panic(err) + } + + // Step 4: Send 'initialized' notification + err = gopls.server.Initialized(ctx, &protocol.InitializedParams{}) + if err != nil { + logger.Error("Error during initialized", zap.Error(err)) + panic(err) + } + + return gopls +} + +func (gopls *Connection) Definition(ctx context.Context, filePath string, lineNumber int, charPosition int) (string, int, error) { + // NOTE: gopls uses 0-based line and column numbers + defFile, defLine, err := gopls.definition(ctx, filePath, lineNumber-1, charPosition-1) + return defFile, defLine + 1, err +} + +func (gopls *Connection) definition(ctx context.Context, filePath string, lineNumber int, charPosition int) (string, int, error) { + // Define the file URI and position where the function/method is invoked + fileURI := protocol.DocumentURI("file://" + filePath) // Replace with actual file URI + line := lineNumber // Line number where the function is called + character := charPosition // Character (column) where the function is called + + // Send the definition request + params := &protocol.DefinitionParams{ + TextDocumentPositionParams: protocol.TextDocumentPositionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: fileURI, + }, + Position: protocol.Position{ + Line: uint32(line), + Character: uint32(character), + }, + }, + } + + // Create context with a timeout to avoid hanging + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + locations, err := gopls.server.Definition(ctx, params) + if err != nil { + return "", 0, errors.Wrap(err, "failed to fetch definition") + } + + if len(locations) == 0 { + return "", 0, errors.New("no definition found") + } + + location := locations[0] + return location.URI.Filename(), int(location.Range.Start.Line), nil +} + +func (gopls *Connection) DidOpen(ctx context.Context, path string, content string, logger *zap.Logger) { + err := gopls.server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{ + TextDocument: protocol.TextDocumentItem{ + URI: protocol.DocumentURI(path), + LanguageID: "go", + Version: 1, + Text: content, + }, + }) + if err != nil { + logger.Error("failed to call DidOpen", zap.Error(err)) + } +} + +func (gopls *Connection) DidClose(ctx context.Context, path string, lgoger *zap.Logger) { + err := gopls.server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.DocumentURI(path), + }, + }) + if err != nil { + lgoger.Error("failed to call DidClose", zap.Error(err)) + } +} diff --git a/cmd/lint-panics/gopls/stream.go b/cmd/lint-panics/gopls/stream.go new file mode 100644 index 00000000000..4792d3f26b7 --- /dev/null +++ b/cmd/lint-panics/gopls/stream.go @@ -0,0 +1,29 @@ +package gopls + +import "io" + +// IOStream combines stdin and stdout into one interface. +type IOStream struct { + stdin io.WriteCloser + stdout io.ReadCloser +} + +// Write writes data to stdin. +func (c *IOStream) Write(p []byte) (n int, err error) { + return c.stdin.Write(p) +} + +// Read reads data from stdout. +func (c *IOStream) Read(p []byte) (n int, err error) { + return c.stdout.Read(p) +} + +// Close closes both stdin and stdout. +func (c *IOStream) Close() error { + err1 := c.stdin.Close() + err2 := c.stdout.Close() + if err1 != nil { + return err1 + } + return err2 +} diff --git a/cmd/lint-panics/main.go b/cmd/lint-panics/main.go new file mode 100644 index 00000000000..d046f0d8954 --- /dev/null +++ b/cmd/lint-panics/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + "golang.org/x/tools/go/analysis/singlechecker" + + "github.com/status-im/status-go/cmd/lint-panics/analyzer" + "github.com/status-im/status-go/cmd/lint-panics/utils" +) + +/* + Run with `-root=` to specify the root directory to run gopls. Defaults to the current working directory. + Set `-skip=` to skip errors in certain directories. If relative, it is relative to the root directory. + + If provided, `-root` and `-skip` arguments MUST go first, before any other args. +*/ + +func main() { + logger := utils.BuildLogger(zap.ErrorLevel) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + a, err := analyzer.New(ctx, logger) + if err != nil { + logger.Error("failed to create analyzer", zap.Error(err)) + os.Exit(1) + } + + singlechecker.Main(a) +} diff --git a/cmd/lint-panics/utils/utils.go b/cmd/lint-panics/utils/utils.go new file mode 100644 index 00000000000..e0f69588a29 --- /dev/null +++ b/cmd/lint-panics/utils/utils.go @@ -0,0 +1,39 @@ +package utils + +import ( + "strconv" + + "fmt" + "os" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func URI(path string, line int) string { + return path + ":" + strconv.Itoa(line) +} + +func ZapURI(path string, line int) zap.Field { + return zap.Field{ + Type: zapcore.StringType, + Key: "uri", + String: URI(path, line), + } +} + +func BuildLogger(level zapcore.Level) *zap.Logger { + // Initialize logger with colors + loggerConfig := zap.NewDevelopmentConfig() + loggerConfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + loggerConfig.Level = zap.NewAtomicLevelAt(level) + loggerConfig.Development = false + loggerConfig.DisableStacktrace = true + logger, err := loggerConfig.Build() + if err != nil { + fmt.Printf("failed to initialize logger: %s", err.Error()) + os.Exit(1) + } + + return logger.Named("main") +} diff --git a/cmd/node-canary/main.go b/cmd/node-canary/main.go index 42ffbf5cf32..12c15ee2827 100644 --- a/cmd/node-canary/main.go +++ b/cmd/node-canary/main.go @@ -156,7 +156,7 @@ func startClientNode() (*api.GethStatusBackend, error) { if err != nil { return nil, err } - clientBackend := api.NewGethStatusBackend() + clientBackend := api.NewGethStatusBackend(logutils.ZapLogger()) err = clientBackend.AccountManager().InitKeystore(config.KeyStoreDir) if err != nil { return nil, err diff --git a/cmd/ping-community/main.go b/cmd/ping-community/main.go index 74adeddafc2..c2754aa6203 100644 --- a/cmd/ping-community/main.go +++ b/cmd/ping-community/main.go @@ -123,7 +123,7 @@ func main() { return } - backend := api.NewGethStatusBackend() + backend := api.NewGethStatusBackend(logutils.ZapLogger()) err = ImportAccount(*seedPhrase, backend) if err != nil { logger.Error("failed import account", "err", err) diff --git a/cmd/populate-db/main.go b/cmd/populate-db/main.go index 95afce0f439..1d55a8db379 100644 --- a/cmd/populate-db/main.go +++ b/cmd/populate-db/main.go @@ -132,7 +132,7 @@ func main() { return } - backend := api.NewGethStatusBackend() + backend := api.NewGethStatusBackend(logutils.ZapLogger()) err = ImportAccount(*seedPhrase, backend) if err != nil { logger.Error("failed import account", "err", err) diff --git a/cmd/spiff-workflow/main.go b/cmd/spiff-workflow/main.go index 76cff2c9d3d..80504082734 100644 --- a/cmd/spiff-workflow/main.go +++ b/cmd/spiff-workflow/main.go @@ -127,7 +127,7 @@ func main() { profiling.NewProfiler(*pprofPort).Go() } - backend := api.NewGethStatusBackend() + backend := api.NewGethStatusBackend(logutils.ZapLogger()) err = ImportAccount(*seedPhrase, backend) if err != nil { logger.Error("failed import account", "err", err) diff --git a/cmd/status-cli/util.go b/cmd/status-cli/util.go index 754f49cfe6e..04a6ac514fc 100644 --- a/cmd/status-cli/util.go +++ b/cmd/status-cli/util.go @@ -55,7 +55,7 @@ func start(p StartParams, logger *zap.SugaredLogger) (*StatusCLI, error) { setupLogger(p.Name) logger.Info("starting messenger") - backend := api.NewGethStatusBackend() + backend := api.NewGethStatusBackend(logutils.ZapLogger()) if p.KeyUID != "" { if err := getAccountAndLogin(backend, p.Name, rootDataDir, password, p.KeyUID); err != nil { return nil, err @@ -81,7 +81,7 @@ func start(p StartParams, logger *zap.SugaredLogger) (*StatusCLI, error) { } waku := backend.StatusNode().WakuV2Service() telemetryClient := telemetry.NewClient(telemetryLogger, p.TelemetryURL, backend.SelectedAccountKeyID(), p.Name, "cli", telemetry.WithPeerID(waku.PeerID().String())) - go telemetryClient.Start(context.Background()) + telemetryClient.Start(context.Background()) backend.StatusNode().WakuV2Service().SetStatusTelemetryClient(telemetryClient) } wakuAPI := wakuv2ext.NewPublicAPI(wakuService) diff --git a/cmd/statusd/main.go b/cmd/statusd/main.go index bf95b86bbfb..8c02792b885 100644 --- a/cmd/statusd/main.go +++ b/cmd/statusd/main.go @@ -187,7 +187,7 @@ func main() { }() } - backend := api.NewGethStatusBackend() + backend := api.NewGethStatusBackend(logutils.ZapLogger()) if config.NodeKey == "" { logger.Error("node key needs to be set if running a push notification server") return diff --git a/common/dbsetup/db_setup.go b/common/dbsetup/db_setup.go index 79c7c5296bf..21287dc706e 100644 --- a/common/dbsetup/db_setup.go +++ b/common/dbsetup/db_setup.go @@ -4,7 +4,7 @@ import ( "database/sql" "errors" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) const InMemoryPath = ":memory:" @@ -22,8 +22,7 @@ type DatabaseInitializer interface { // GetDBFilename takes an instance of sql.DB and returns the filename of the "main" database func GetDBFilename(db *sql.DB) (string, error) { if db == nil { - logger := log.New() - logger.Warn("GetDBFilename was passed a nil pointer sql.DB") + logutils.ZapLogger().Warn("GetDBFilename was passed a nil pointer sql.DB") return "", nil } diff --git a/common/utils.go b/common/utils.go index 09cfd970414..82936fab833 100644 --- a/common/utils.go +++ b/common/utils.go @@ -5,11 +5,12 @@ import ( "errors" "reflect" "regexp" - "runtime/debug" "strings" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/status-im/status-go/eth-node/crypto" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/protocol/identity/alias" "github.com/status-im/status-go/protocol/protobuf" ) @@ -89,7 +90,7 @@ func IsNil(i interface{}) bool { func LogOnPanic() { if err := recover(); err != nil { - log.Error("panic in goroutine", "error", err, "stacktrace", string(debug.Stack())) + logutils.ZapLogger().Error("panic in goroutine", zap.Any("error", err), zap.Stack("stacktrace")) panic(err) } } diff --git a/db/db.go b/db/db.go index 498cda3da16..c6ecd93743f 100644 --- a/db/db.go +++ b/db/db.go @@ -9,8 +9,9 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/util" + "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) type storagePrefix byte @@ -84,7 +85,7 @@ func Create(path, dbName string) (*leveldb.DB, error) { func Open(path string, opts *opt.Options) (db *leveldb.DB, err error) { db, err = leveldb.OpenFile(path, opts) if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted { - log.Info("database is corrupted trying to recover", "path", path) + logutils.ZapLogger().Info("database is corrupted trying to recover", zap.String("path", path)) db, err = leveldb.RecoverFile(path, nil) } return diff --git a/discovery/discv5.go b/discovery/discv5.go index 882b0fe4fd3..0d71b7d7a3d 100644 --- a/discovery/discv5.go +++ b/discovery/discv5.go @@ -6,8 +6,10 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/p2p/discv5" + "github.com/status-im/status-go/logutils" ) // NewDiscV5 creates instances of discovery v5 facade. @@ -40,7 +42,7 @@ func (d *DiscV5) Running() bool { func (d *DiscV5) Start() error { d.mu.Lock() defer d.mu.Unlock() - log.Debug("Starting discovery", "listen address", d.laddr) + logutils.ZapLogger().Debug("Starting discovery", zap.String("listen address", d.laddr)) addr, err := net.ResolveUDPAddr("udp", d.laddr) if err != nil { return err diff --git a/go.mod b/go.mod index b3abfac22bc..b04f014ec00 100644 --- a/go.mod +++ b/go.mod @@ -99,6 +99,9 @@ require ( github.com/wk8/go-ordered-map/v2 v2.1.7 github.com/yeqown/go-qrcode/v2 v2.2.1 github.com/yeqown/go-qrcode/writer/standard v1.2.1 + go.lsp.dev/jsonrpc2 v0.10.0 + go.lsp.dev/protocol v0.12.0 + go.lsp.dev/uri v0.3.0 go.uber.org/mock v0.4.0 go.uber.org/multierr v1.11.0 golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa @@ -253,6 +256,8 @@ require ( github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/segmentio/asm v1.1.3 // indirect + github.com/segmentio/encoding v0.3.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -275,6 +280,7 @@ require ( github.com/yeqown/reedsolomon v1.0.0 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect + go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect diff --git a/go.sum b/go.sum index 9112614af33..d489fe7ef1b 100644 --- a/go.sum +++ b/go.sum @@ -1936,6 +1936,10 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/segmentio/asm v1.1.3 h1:WM03sfUOENvvKexOLp+pCqgb/WDjsi7EK8gIsICtzhc= +github.com/segmentio/asm v1.1.3/go.mod h1:Ld3L4ZXGNcSLRg4JBsZ3//1+f/TjYl0Mzen/DQy1EJg= +github.com/segmentio/encoding v0.3.4 h1:WM4IBnxH8B9TakiM2QD5LyNl9JSndh88QbHqVC+Pauc= +github.com/segmentio/encoding v0.3.4/go.mod h1:n0JeuIqEQrQoPDGsjo8UNd1iA0U8d8+oHAA4E3G3OxM= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -2221,6 +2225,14 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= +go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 h1:hCzQgh6UcwbKgNSRurYWSqh8MufqRRPODRBblutn4TE= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2/go.mod h1:gtSHRuYfbCT0qnbLnovpie/WEmqyJ7T4n6VXiFMBtcw= +go.lsp.dev/protocol v0.12.0 h1:tNprUI9klQW5FAFVM4Sa+AbPFuVQByWhP1ttNUAjIWg= +go.lsp.dev/protocol v0.12.0/go.mod h1:Qb11/HgZQ72qQbeyPfJbu3hZBH23s1sr4st8czGeDMQ= +go.lsp.dev/uri v0.3.0 h1:KcZJmh6nFIBeJzTugn5JTU6OOyG0lDOo3R9KwTxTYbo= +go.lsp.dev/uri v0.3.0/go.mod h1:P5sbO1IQR+qySTWOCnhnK7phBx+W3zbLqSMDJNTw88I= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -2677,6 +2689,7 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/healthmanager/blockchain_health_manager.go b/healthmanager/blockchain_health_manager.go index 7fcdf5d6d92..234ac32d2fb 100644 --- a/healthmanager/blockchain_health_manager.go +++ b/healthmanager/blockchain_health_manager.go @@ -4,6 +4,7 @@ import ( "context" "sync" + status_common "github.com/status-im/status-go/common" "github.com/status-im/status-go/healthmanager/aggregator" "github.com/status-im/status-go/healthmanager/rpcstatus" ) @@ -72,6 +73,7 @@ func (b *BlockchainHealthManager) RegisterProvidersHealthManager(ctx context.Con statusCh := phm.Subscribe() b.wg.Add(1) go func(phm *ProvidersHealthManager, statusCh chan struct{}, providerCtx context.Context) { + defer status_common.LogOnPanic() defer func() { phm.Unsubscribe(statusCh) b.wg.Done() diff --git a/images/decode.go b/images/decode.go index 8e8342738bd..6b25ffae70b 100644 --- a/images/decode.go +++ b/images/decode.go @@ -15,9 +15,10 @@ import ( "time" "unicode/utf8" + "go.uber.org/zap" "golang.org/x/image/webp" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) var ( @@ -66,7 +67,7 @@ func DecodeFromURL(path string) (image.Image, error) { defer func() { if err := res.Body.Close(); err != nil { - log.Error("failed to close profile pic http request body", "err", err) + logutils.ZapLogger().Error("failed to close profile pic http request body", zap.Error(err)) } }() diff --git a/images/manipulation.go b/images/manipulation.go index 033b838374d..48b5845d44e 100644 --- a/images/manipulation.go +++ b/images/manipulation.go @@ -15,7 +15,7 @@ import ( "go.uber.org/zap" xdraw "golang.org/x/image/draw" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) type Circle struct { @@ -48,7 +48,10 @@ func Resize(size ResizeDimension, img image.Image) image.Image { width, height = uint(size), 0 } - log.Info("resizing", "size", size, "width", width, "height", height) + logutils.ZapLogger().Info("resizing", + zap.Uint("size", uint(size)), + zap.Uint("width", width), + zap.Uint("height", height)) return resize.Resize(width, height, img, resize.Bilinear) } @@ -264,14 +267,14 @@ func SuperimposeLogoOnQRImage(imageBytes []byte, qrFilepath []byte) []byte { img1, _, err := image.Decode(bytes.NewReader(imageBytes)) if err != nil { - log.Error("error decoding logo Image", zap.Error(err)) + logutils.ZapLogger().Error("error decoding logo Image", zap.Error(err)) return nil } img2, _, err := image.Decode(bytes.NewReader(qrFilepath)) if err != nil { - log.Error("error decoding QR Image", zap.Error(err)) + logutils.ZapLogger().Error("error decoding QR Image", zap.Error(err)) return nil } // Create a new image with the dimensions of the first image @@ -290,7 +293,7 @@ func SuperimposeLogoOnQRImage(imageBytes []byte, qrFilepath []byte) []byte { err = png.Encode(&b, result) if err != nil { - log.Error("error encoding final result Image to Buffer", zap.Error(err)) + logutils.ZapLogger().Error("error encoding final result Image to Buffer", zap.Error(err)) return nil } diff --git a/ipfs/ipfs.go b/ipfs/ipfs.go index 06eae4c505f..0b9203a7511 100644 --- a/ipfs/ipfs.go +++ b/ipfs/ipfs.go @@ -12,10 +12,11 @@ import ( "github.com/ipfs/go-cid" "github.com/wealdtech/go-multicodec" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" ) @@ -214,12 +215,12 @@ func (d *Downloader) download(cid string, download bool) ([]byte, error) { defer func() { if err := resp.Body.Close(); err != nil { - log.Error("failed to close the stickerpack request body", "err", err) + logutils.ZapLogger().Error("failed to close the stickerpack request body", zap.Error(err)) } }() if resp.StatusCode < 200 || resp.StatusCode > 299 { - log.Error("could not load data for", "cid", cid, "code", resp.StatusCode) + logutils.ZapLogger().Error("could not load data for", zap.String("cid", cid), zap.Int("code", resp.StatusCode)) return nil, errors.New("could not load ipfs data") } diff --git a/logutils/custom.go b/logutils/custom.go index b876c08bc4a..6ae7c31b666 100644 --- a/logutils/custom.go +++ b/logutils/custom.go @@ -2,6 +2,7 @@ package logutils import ( "fmt" + "time" "go.uber.org/zap" ) @@ -13,3 +14,11 @@ func WakuMessageTimestamp(key string, value *int64) zap.Field { } return zap.String(key, valueStr) } + +func UnixTimeMs(key string, t time.Time) zap.Field { + return zap.String(key, fmt.Sprintf("%d", t.UnixMilli())) +} + +func UnixTimeNano(key string, t time.Time) zap.Field { + return zap.String(key, fmt.Sprintf("%d", t.UnixNano())) +} diff --git a/logutils/logrotation.go b/logutils/logrotation.go index 40963175fcc..27934b02386 100644 --- a/logutils/logrotation.go +++ b/logutils/logrotation.go @@ -1,6 +1,7 @@ package logutils import ( + "go.uber.org/zap/zapcore" "gopkg.in/natefinch/lumberjack.v2" "github.com/ethereum/go-ethereum/log" @@ -28,3 +29,13 @@ func FileHandlerWithRotation(opts FileOptions, format log.Format) log.Handler { } return log.StreamHandler(logger, format) } + +// ZapSyncerWithRotation creates a zapcore.WriteSyncer with a configured rotation +func ZapSyncerWithRotation(opts FileOptions) zapcore.WriteSyncer { + return zapcore.AddSync(&lumberjack.Logger{ + Filename: opts.Filename, + MaxSize: opts.MaxSize, + MaxBackups: opts.MaxBackups, + Compress: opts.Compress, + }) +} diff --git a/logutils/requestlog/request_log.go b/logutils/requestlog/request_log.go index 0b48907c427..cf050943e75 100644 --- a/logutils/requestlog/request_log.go +++ b/logutils/requestlog/request_log.go @@ -2,58 +2,49 @@ package requestlog import ( "errors" - "sync/atomic" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" "github.com/status-im/status-go/logutils" + "github.com/status-im/status-go/protocol/zaputil" ) var ( - // requestLogger is the request logger object - requestLogger log.Logger - // isRequestLoggingEnabled controls whether request logging is enabled - isRequestLoggingEnabled atomic.Bool + requestLogger *zap.Logger ) -// NewRequestLogger creates a new request logger object -func NewRequestLogger(ctx ...interface{}) log.Logger { - requestLogger = log.New(ctx...) +// GetRequestLogger returns the RPC logger object +func GetRequestLogger() *zap.Logger { return requestLogger } -// EnableRequestLogging enables or disables RPC logging -func EnableRequestLogging(enable bool) { - if enable { - isRequestLoggingEnabled.Store(true) - } else { - isRequestLoggingEnabled.Store(false) +func CreateRequestLogger(file string) (*zap.Logger, error) { + if len(file) == 0 { + return nil, errors.New("file is required") } -} -// IsRequestLoggingEnabled returns whether RPC logging is enabled -func IsRequestLoggingEnabled() bool { - return isRequestLoggingEnabled.Load() -} + fileOpts := logutils.FileOptions{ + Filename: file, + MaxBackups: 1, + } -// GetRequestLogger returns the RPC logger object -func GetRequestLogger() log.Logger { - return requestLogger + core := zapcore.NewCore( + zaputil.NewConsoleHexEncoder(zap.NewDevelopmentEncoderConfig()), + zapcore.AddSync(logutils.ZapSyncerWithRotation(fileOpts)), + zap.DebugLevel, + ) + + return zap.New(core).Named("RequestLogger"), nil } func ConfigureAndEnableRequestLogging(file string) error { - log.Info("initialising request logger", "log file", file) - requestLogger := NewRequestLogger() - if file == "" { - return errors.New("log file path is required") - } - fileOpts := logutils.FileOptions{ - Filename: file, - MaxBackups: 1, + logger, err := CreateRequestLogger(file) + if err != nil { + return err } - handler := logutils.FileHandlerWithRotation(fileOpts, log.LogfmtFormat()) - filteredHandler := log.LvlFilterHandler(log.LvlDebug, handler) - requestLogger.SetHandler(filteredHandler) - EnableRequestLogging(true) + + requestLogger = logger + return nil } diff --git a/mailserver/cleaner.go b/mailserver/cleaner.go index 571826fcc92..416b452f4bd 100644 --- a/mailserver/cleaner.go +++ b/mailserver/cleaner.go @@ -4,8 +4,10 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) const ( @@ -38,7 +40,7 @@ func newDBCleaner(db DB, retention time.Duration) *dbCleaner { // Start starts a loop that cleans up old messages. func (c *dbCleaner) Start() { - log.Info("Starting cleaning envelopes", "period", c.period, "retention", c.retention) + logutils.ZapLogger().Info("Starting cleaning envelopes", zap.Duration("period", c.period), zap.Duration("retention", c.retention)) cancel := make(chan struct{}) @@ -71,9 +73,9 @@ func (c *dbCleaner) schedule(period time.Duration, cancel <-chan struct{}) { case <-t.C: count, err := c.PruneEntriesOlderThan(time.Now().Add(-c.retention)) if err != nil { - log.Error("failed to prune data", "err", err) + logutils.ZapLogger().Error("failed to prune data", zap.Error(err)) } - log.Info("Prunned some some messages successfully", "count", count) + logutils.ZapLogger().Info("Prunned some some messages successfully", zap.Int("count", count)) case <-cancel: return } diff --git a/mailserver/mailserver.go b/mailserver/mailserver.go index f81ef83c2a2..8e9a5724d07 100644 --- a/mailserver/mailserver.go +++ b/mailserver/mailserver.go @@ -26,14 +26,15 @@ import ( "time" prom "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" gocommon "github.com/status-im/status-go/common" gethbridge "github.com/status-im/status-go/eth-node/bridge/geth" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/waku" wakucommon "github.com/status-im/status-go/waku/common" @@ -144,11 +145,11 @@ func (s *WakuMailServer) DeliverMail(peerID []byte, req *wakucommon.Envelope) { payload, err := s.decodeRequest(peerID, req) if err != nil { deliveryFailuresCounter.WithLabelValues("validation").Inc() - log.Error( + logutils.ZapLogger().Error( "[mailserver:DeliverMail] request failed validaton", - "peerID", types.BytesToHash(peerID), - "requestID", req.Hash().String(), - "err", err, + zap.Stringer("peerID", types.BytesToHash(peerID)), + zap.Stringer("requestID", req.Hash()), + zap.Error(err), ) s.ms.sendHistoricMessageErrorResponse(types.BytesToHash(peerID), types.Hash(req.Hash()), err) return @@ -277,12 +278,12 @@ func (s *WakuMailServer) decodeRequest(peerID []byte, request *wakucommon.Envelo decrypted := s.openEnvelope(request) if decrypted == nil { - log.Warn("Failed to decrypt p2p request") + logutils.ZapLogger().Warn("Failed to decrypt p2p request") return payload, errors.New("failed to decrypt p2p request") } if err := checkMsgSignature(decrypted.Src, peerID); err != nil { - log.Warn("Check message signature failed", "err", err.Error()) + logutils.ZapLogger().Warn("Check message signature failed", zap.Error(err)) return payload, fmt.Errorf("check message signature failed: %v", err) } @@ -295,7 +296,7 @@ func (s *WakuMailServer) decodeRequest(peerID []byte, request *wakucommon.Envelo } if payload.Upper < payload.Lower { - log.Error("Query range is invalid: lower > upper", "lower", payload.Lower, "upper", payload.Upper) + logutils.ZapLogger().Error("Query range is invalid: lower > upper", zap.Uint32("lower", payload.Lower), zap.Uint32("upper", payload.Upper)) return payload, errors.New("query range is invalid: lower > upper") } @@ -400,13 +401,13 @@ func newMailServer(cfg Config, adapter adapter, service service) (*mailServer, e // Open database in the last step in order not to init with error // and leave the database open by accident. if cfg.PostgresEnabled { - log.Info("Connecting to postgres database") + logutils.ZapLogger().Info("Connecting to postgres database") database, err := NewPostgresDB(cfg.PostgresURI) if err != nil { return nil, fmt.Errorf("open DB: %s", err) } s.db = database - log.Info("Connected to postgres database") + logutils.ZapLogger().Info("Connected to postgres database") } else { // Defaults to LevelDB database, err := NewLevelDB(cfg.DataDir) @@ -439,7 +440,7 @@ func (s *mailServer) setupCleaner(retention time.Duration) { func (s *mailServer) Archive(env types.Envelope) { err := s.db.SaveEnvelope(env) if err != nil { - log.Error("Could not save envelope", "hash", env.Hash().String()) + logutils.ZapLogger().Error("Could not save envelope", zap.Stringer("hash", env.Hash())) } } @@ -448,34 +449,34 @@ func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPa defer timer.ObserveDuration() deliveryAttemptsCounter.Inc() - log.Info( + logutils.ZapLogger().Info( "[mailserver:DeliverMail] delivering mail", - "peerID", peerID.String(), - "requestID", reqID.String(), + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), ) req.SetDefaults() - log.Info( + logutils.ZapLogger().Info( "[mailserver:DeliverMail] processing request", - "peerID", peerID.String(), - "requestID", reqID.String(), - "lower", req.Lower, - "upper", req.Upper, - "bloom", req.Bloom, - "topics", req.Topics, - "limit", req.Limit, - "cursor", req.Cursor, - "batch", req.Batch, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Uint32("lower", req.Lower), + zap.Uint32("upper", req.Upper), + zap.Binary("bloom", req.Bloom), + zap.Any("topics", req.Topics), + zap.Uint32("limit", req.Limit), + zap.Binary("cursor", req.Cursor), + zap.Bool("batch", req.Batch), ) if err := req.Validate(); err != nil { syncFailuresCounter.WithLabelValues("req_invalid").Inc() - log.Error( + logutils.ZapLogger().Error( "[mailserver:DeliverMail] request invalid", - "peerID", peerID.String(), - "requestID", reqID.String(), - "err", err, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Error(err), ) s.sendHistoricMessageErrorResponse(peerID, reqID, fmt.Errorf("request is invalid: %v", err)) return @@ -483,10 +484,10 @@ func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPa if s.exceedsPeerRequests(peerID) { deliveryFailuresCounter.WithLabelValues("peer_req_limit").Inc() - log.Error( + logutils.ZapLogger().Error( "[mailserver:DeliverMail] peer exceeded the limit", - "peerID", peerID.String(), - "requestID", reqID.String(), + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), ) s.sendHistoricMessageErrorResponse(peerID, reqID, fmt.Errorf("rate limit exceeded")) return @@ -498,11 +499,11 @@ func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPa iter, err := s.createIterator(req) if err != nil { - log.Error( + logutils.ZapLogger().Error( "[mailserver:DeliverMail] request failed", - "peerID", peerID.String(), - "requestID", reqID.String(), - "err", err, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Error(err), ) return } @@ -524,11 +525,11 @@ func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPa counter++ } close(errCh) - log.Info( + logutils.ZapLogger().Info( "[mailserver:DeliverMail] finished sending bundles", - "peerID", peerID, - "requestID", reqID.String(), - "counter", counter, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Int("counter", counter), ) }() @@ -546,11 +547,11 @@ func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPa // Wait for the goroutine to finish the work. It may return an error. if err := <-errCh; err != nil { deliveryFailuresCounter.WithLabelValues("process").Inc() - log.Error( + logutils.ZapLogger().Error( "[mailserver:DeliverMail] error while processing", - "err", err, - "peerID", peerID, - "requestID", reqID, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Error(err), ) s.sendHistoricMessageErrorResponse(peerID, reqID, err) return @@ -559,29 +560,29 @@ func (s *mailServer) DeliverMail(peerID, reqID types.Hash, req MessagesRequestPa // Processing of the request could be finished earlier due to iterator error. if err := iter.Error(); err != nil { deliveryFailuresCounter.WithLabelValues("iterator").Inc() - log.Error( + logutils.ZapLogger().Error( "[mailserver:DeliverMail] iterator failed", - "err", err, - "peerID", peerID, - "requestID", reqID, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Error(err), ) s.sendHistoricMessageErrorResponse(peerID, reqID, err) return } - log.Info( + logutils.ZapLogger().Info( "[mailserver:DeliverMail] sending historic message response", - "peerID", peerID, - "requestID", reqID, - "last", lastEnvelopeHash, - "next", nextPageCursor, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Stringer("last", lastEnvelopeHash), + zap.Binary("next", nextPageCursor), ) s.sendHistoricMessageResponse(peerID, reqID, lastEnvelopeHash, nextPageCursor) } func (s *mailServer) SyncMail(peerID types.Hash, req MessagesRequestPayload) error { - log.Info("Started syncing envelopes", "peer", peerID.String(), "req", req) + logutils.ZapLogger().Info("Started syncing envelopes", zap.Stringer("peer", peerID), zap.Any("req", req)) requestID := fmt.Sprintf("%d-%d", time.Now().UnixNano(), rand.Intn(1000)) // nolint: gosec @@ -590,7 +591,7 @@ func (s *mailServer) SyncMail(peerID types.Hash, req MessagesRequestPayload) err // Check rate limiting for a requesting peer. if s.exceedsPeerRequests(peerID) { syncFailuresCounter.WithLabelValues("req_per_sec_limit").Inc() - log.Error("Peer exceeded request per seconds limit", "peerID", peerID.String()) + logutils.ZapLogger().Error("Peer exceeded request per seconds limit", zap.Stringer("peerID", peerID)) return fmt.Errorf("requests per seconds limit exceeded") } @@ -656,7 +657,7 @@ func (s *mailServer) SyncMail(peerID types.Hash, req MessagesRequestPayload) err return fmt.Errorf("LevelDB iterator failed: %v", err) } - log.Info("Finished syncing envelopes", "peer", peerID.String()) + logutils.ZapLogger().Info("Finished syncing envelopes", zap.Stringer("peer", peerID)) err = s.service.SendSyncResponse( peerID.Bytes(), @@ -674,7 +675,7 @@ func (s *mailServer) SyncMail(peerID types.Hash, req MessagesRequestPayload) err func (s *mailServer) Close() { if s.db != nil { if err := s.db.Close(); err != nil { - log.Error("closing database failed", "err", err) + logutils.ZapLogger().Error("closing database failed", zap.Error(err)) } } if s.rateLimiter != nil { @@ -698,7 +699,7 @@ func (s *mailServer) exceedsPeerRequests(peerID types.Hash) bool { return false } - log.Info("peerID exceeded the number of requests per second", "peerID", peerID.String()) + logutils.ZapLogger().Info("peerID exceeded the number of requests per second", zap.Stringer("peerID", peerID)) return true } @@ -746,10 +747,10 @@ func (s *mailServer) processRequestInBundles( lastEnvelopeHash types.Hash ) - log.Info( + logutils.ZapLogger().Info( "[mailserver:processRequestInBundles] processing request", - "requestID", requestID, - "limit", limit, + zap.String("requestID", requestID), + zap.Int("limit", limit), ) var topicsMap map[types.TopicType]bool @@ -779,10 +780,10 @@ func (s *mailServer) processRequestInBundles( err = errors.New("either topics or bloom must be specified") } if err != nil { - log.Error( + logutils.ZapLogger().Error( "[mailserver:processRequestInBundles]Failed to get envelope from iterator", - "err", err, - "requestID", requestID, + zap.String("requestID", requestID), + zap.Error(err), ) continue } @@ -793,9 +794,10 @@ func (s *mailServer) processRequestInBundles( key, err := iter.DBKey() if err != nil { - log.Error( + logutils.ZapLogger().Error( "[mailserver:processRequestInBundles] failed getting key", - "requestID", requestID, + zap.String("requestID", requestID), + zap.Error(err), ) break @@ -839,13 +841,13 @@ func (s *mailServer) processRequestInBundles( processedEnvelopesSize += int64(bundleSize) } - log.Info( + logutils.ZapLogger().Info( "[mailserver:processRequestInBundles] publishing envelopes", - "requestID", requestID, - "batchesCount", len(batches), - "envelopeCount", processedEnvelopes, - "processedEnvelopesSize", processedEnvelopesSize, - "cursor", nextCursor, + zap.String("requestID", requestID), + zap.Int("batchesCount", len(batches)), + zap.Int("envelopeCount", processedEnvelopes), + zap.Int64("processedEnvelopesSize", processedEnvelopesSize), + zap.Binary("cursor", nextCursor), ) // Publish @@ -858,15 +860,15 @@ batchLoop: // the consumer of `output` channel exits prematurely. // In such a case, we should stop pushing batches and exit. case <-cancel: - log.Info( + logutils.ZapLogger().Info( "[mailserver:processRequestInBundles] failed to push all batches", - "requestID", requestID, + zap.String("requestID", requestID), ) break batchLoop case <-time.After(timeout): - log.Error( + logutils.ZapLogger().Error( "[mailserver:processRequestInBundles] timed out pushing a batch", - "requestID", requestID, + zap.String("requestID", requestID), ) break batchLoop } @@ -875,9 +877,9 @@ batchLoop: envelopesCounter.Inc() sentEnvelopeBatchSizeMeter.Observe(float64(processedEnvelopesSize)) - log.Info( + logutils.ZapLogger().Info( "[mailserver:processRequestInBundles] envelopes published", - "requestID", requestID, + zap.String("requestID", requestID), ) close(output) @@ -906,11 +908,11 @@ func (s *mailServer) sendHistoricMessageResponse(peerID, reqID, lastEnvelopeHash err := s.service.SendHistoricMessageResponse(peerID.Bytes(), payload) if err != nil { deliveryFailuresCounter.WithLabelValues("historic_msg_resp").Inc() - log.Error( + logutils.ZapLogger().Error( "[mailserver:DeliverMail] error sending historic message response", - "err", err, - "peerID", peerID, - "requestID", reqID, + zap.Stringer("peerID", peerID), + zap.Stringer("requestID", reqID), + zap.Error(err), ) } } @@ -921,7 +923,7 @@ func (s *mailServer) sendHistoricMessageErrorResponse(peerID, reqID types.Hash, // if we can't report an error, probably something is wrong with p2p connection, // so we just print a log entry to document this sad fact if err != nil { - log.Error("Error while reporting error response", "err", err, "peerID", peerID.String()) + logutils.ZapLogger().Error("Error while reporting error response", zap.Stringer("peerID", peerID), zap.Error(err)) } } diff --git a/mailserver/mailserver_db_leveldb.go b/mailserver/mailserver_db_leveldb.go index c4b60ff2dbc..88f7705be1e 100644 --- a/mailserver/mailserver_db_leveldb.go +++ b/mailserver/mailserver_db_leveldb.go @@ -1,19 +1,19 @@ package mailserver import ( - "fmt" "time" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/util" + "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/status-im/status-go/common" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" waku "github.com/status-im/status-go/waku/common" ) @@ -84,7 +84,7 @@ func NewLevelDB(dataDir string) (*LevelDB, error) { // Open opens an existing leveldb database db, err := leveldb.OpenFile(dataDir, nil) if _, corrupted := err.(*errors.ErrCorrupted); corrupted { - log.Info("database is corrupted trying to recover", "path", dataDir) + logutils.ZapLogger().Info("database is corrupted trying to recover", zap.String("path", dataDir)) db, err = leveldb.RecoverFile(dataDir, nil) } @@ -119,7 +119,7 @@ func (db *LevelDB) GetEnvelope(key *DBKey) ([]byte, error) { func (db *LevelDB) updateArchivedEnvelopesCount() { if count, err := db.envelopesCount(); err != nil { - log.Warn("db query for envelopes count failed", "err", err) + logutils.ZapLogger().Warn("db query for envelopes count failed", zap.Error(err)) } else { archivedEnvelopesGauge.WithLabelValues(db.name).Set(float64(count)) } @@ -210,13 +210,13 @@ func (db *LevelDB) SaveEnvelope(env types.Envelope) error { key := NewDBKey(env.Expiry()-env.TTL(), env.Topic(), env.Hash()) rawEnvelope, err := rlp.EncodeToBytes(env.Unwrap()) if err != nil { - log.Error(fmt.Sprintf("rlp.EncodeToBytes failed: %s", err)) + logutils.ZapLogger().Error("rlp.EncodeToBytes failed", zap.Error(err)) archivedErrorsCounter.WithLabelValues(db.name).Inc() return err } if err = db.ldb.Put(key.Bytes(), rawEnvelope, nil); err != nil { - log.Error(fmt.Sprintf("Writing to DB failed: %s", err)) + logutils.ZapLogger().Error("writing to DB failed", zap.Error(err)) archivedErrorsCounter.WithLabelValues(db.name).Inc() } archivedEnvelopesGauge.WithLabelValues(db.name).Inc() @@ -238,7 +238,9 @@ func recoverLevelDBPanics(calleMethodName string) { // Recover from possible goleveldb panics if r := recover(); r != nil { if errString, ok := r.(string); ok { - log.Error(fmt.Sprintf("recovered from panic in %s: %s", calleMethodName, errString)) + logutils.ZapLogger().Error("recovered from panic", + zap.String("calleMethodName", calleMethodName), + zap.String("errString", errString)) } } } diff --git a/mailserver/mailserver_db_postgres.go b/mailserver/mailserver_db_postgres.go index a2e81635c6b..151aba7690f 100644 --- a/mailserver/mailserver_db_postgres.go +++ b/mailserver/mailserver_db_postgres.go @@ -7,6 +7,7 @@ import ( "time" "github.com/lib/pq" + "go.uber.org/zap" // Import postgres driver _ "github.com/lib/pq" @@ -15,9 +16,9 @@ import ( bindata "github.com/status-im/migrate/v4/source/go_bindata" "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/mailserver/migrations" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/status-im/status-go/eth-node/types" @@ -84,7 +85,7 @@ func (i *PostgresDB) envelopesCount() (int, error) { func (i *PostgresDB) updateArchivedEnvelopesCount() { if count, err := i.envelopesCount(); err != nil { - log.Warn("db query for envelopes count failed", "err", err) + logutils.ZapLogger().Warn("db query for envelopes count failed", zap.Error(err)) } else { archivedEnvelopesGauge.WithLabelValues(i.name).Set(float64(count)) } @@ -262,7 +263,7 @@ func (i *PostgresDB) SaveEnvelope(env types.Envelope) error { key := NewDBKey(env.Expiry()-env.TTL(), topic, env.Hash()) rawEnvelope, err := rlp.EncodeToBytes(env.Unwrap()) if err != nil { - log.Error(fmt.Sprintf("rlp.EncodeToBytes failed: %s", err)) + logutils.ZapLogger().Error("rlp.EncodeToBytes failed", zap.Error(err)) archivedErrorsCounter.WithLabelValues(i.name).Inc() return err } diff --git a/metrics/metrics.go b/metrics/metrics.go index be04df702c7..7a4e4b9c194 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -5,12 +5,16 @@ import ( "net/http" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/metrics" gethprom "github.com/ethereum/go-ethereum/metrics/prometheus" + "github.com/status-im/status-go/logutils" prom "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/status-im/status-go/common" ) // Server runs and controls a HTTP pprof interface. @@ -36,7 +40,7 @@ func healthHandler() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, err := w.Write([]byte("OK")) if err != nil { - log.Error("health handler error", "err", err) + logutils.ZapLogger().Error("health handler error", zap.Error(err)) } }) } @@ -55,5 +59,6 @@ func Handler(reg metrics.Registry) http.Handler { // Listen starts the HTTP server in the background. func (p *Server) Listen() { - log.Info("metrics server stopped", "err", p.server.ListenAndServe()) + defer common.LogOnPanic() + logutils.ZapLogger().Info("metrics server stopped", zap.Error(p.server.ListenAndServe())) } diff --git a/metrics/node/metrics.go b/metrics/node/metrics.go index e9fff5fc268..04e65142320 100644 --- a/metrics/node/metrics.go +++ b/metrics/node/metrics.go @@ -4,6 +4,8 @@ import ( "errors" "strings" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" @@ -71,7 +73,7 @@ func calculatePeerCounts(server *p2p.Server) { for _, p := range peers { labels, err := labelsFromNodeName(p.Fullname()) if err != nil { - logger.Warn("failed parsing peer name", "error", err, "name", p.Name()) + logger.Warn("failed parsing peer name", zap.String("name", p.Name()), zap.Error(err)) continue } nodePeersGauge.With(labels).Inc() diff --git a/metrics/node/subscribe.go b/metrics/node/subscribe.go index 6d61a2e7616..14ecd0c29f1 100644 --- a/metrics/node/subscribe.go +++ b/metrics/node/subscribe.go @@ -4,14 +4,16 @@ import ( "context" "errors" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) // All general log messages in this package should be routed through this logger. -var logger = log.New("package", "status-go/metrics/node") +var logger = logutils.ZapLogger().Named("metrics.node") // SubscribeServerEvents subscribes to server and listens to // PeerEventTypeAdd and PeerEventTypeDrop events. @@ -50,13 +52,13 @@ func SubscribeServerEvents(ctx context.Context, node *node.Node) error { go func() { defer common.LogOnPanic() if err := updateNodeMetrics(node, event.Type); err != nil { - logger.Error("failed to update node metrics", "err", err) + logger.Error("failed to update node metrics", zap.Error(err)) } }() } case err := <-subscription.Err(): if err != nil { - logger.Error("Subscription failed", "err", err) + logger.Error("Subscription failed", zap.Error(err)) } return err case <-ctx.Done(): diff --git a/mobile/status_request_log.go b/mobile/callog/status_request_log.go similarity index 76% rename from mobile/status_request_log.go rename to mobile/callog/status_request_log.go index 2d7cdc2291e..ae2d5f7892d 100644 --- a/mobile/status_request_log.go +++ b/mobile/callog/status_request_log.go @@ -1,16 +1,16 @@ -package statusgo +package callog import ( "fmt" "reflect" "regexp" "runtime" - "runtime/debug" "strings" "time" - "github.com/ethereum/go-ethereum/log" - "github.com/status-im/status-go/logutils/requestlog" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" ) var sensitiveKeys = []string{ @@ -46,7 +46,7 @@ func getShortFunctionName(fn any) string { return parts[len(parts)-1] } -// call executes the given function and logs request details if logging is enabled +// Call executes the given function and logs request details if logging is enabled // // Parameters: // - fn: The function to be executed @@ -58,21 +58,21 @@ func getShortFunctionName(fn any) string { // Functionality: // 1. Sets up panic recovery to log and re-panic // 2. Records start time if request logging is enabled -// 3. Uses reflection to call the given function +// 3. Uses reflection to Call the given function // 4. If request logging is enabled, logs method name, parameters, response, and execution duration // 5. Removes sensitive information before logging -func call(fn any, params ...any) any { +func Call(logger *zap.Logger, fn any, params ...any) any { defer func() { if r := recover(); r != nil { - // we're not sure if request logging is enabled here, so we log it use default logger - log.Error("panic found in call", "error", r, "stacktrace", string(debug.Stack())) + logutils.ZapLogger().Error("panic found in call", zap.Any("error", r), zap.Stack("stacktrace")) panic(r) } }() var startTime time.Time - if requestlog.IsRequestLoggingEnabled() { + requestLoggingEnabled := logger != nil + if requestLoggingEnabled { startTime = time.Now() } @@ -95,19 +95,25 @@ func call(fn any, params ...any) any { resp = results[0].Interface() } - if requestlog.IsRequestLoggingEnabled() { + if requestLoggingEnabled { duration := time.Since(startTime) methodName := getShortFunctionName(fn) paramsString := removeSensitiveInfo(fmt.Sprintf("%+v", params)) respString := removeSensitiveInfo(fmt.Sprintf("%+v", resp)) - requestlog.GetRequestLogger().Debug(methodName, "params", paramsString, "resp", respString, "duration", duration) + + logger.Debug("call", + zap.String("method", methodName), + zap.String("params", paramsString), + zap.String("resp", respString), + zap.Duration("duration", duration), + ) } return resp } -func callWithResponse(fn any, params ...any) string { - resp := call(fn, params...) +func CallWithResponse(logger *zap.Logger, fn any, params ...any) string { + resp := Call(logger, fn, params...) if resp == nil { return "" } diff --git a/mobile/status_request_log_test.go b/mobile/callog/status_request_log_test.go similarity index 70% rename from mobile/status_request_log_test.go rename to mobile/callog/status_request_log_test.go index a1835569eff..658ab82f951 100644 --- a/mobile/status_request_log_test.go +++ b/mobile/callog/status_request_log_test.go @@ -1,19 +1,16 @@ -package statusgo +package callog import ( - "encoding/json" "fmt" + "os" "strings" "testing" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" "github.com/status-im/status-go/logutils/requestlog" - "github.com/status-im/status-go/multiaccounts" - "github.com/status-im/status-go/multiaccounts/settings" - "github.com/status-im/status-go/signal" - - "github.com/ethereum/go-ethereum/log" ) func TestRemoveSensitiveInfo(t *testing.T) { @@ -60,17 +57,14 @@ func TestRemoveSensitiveInfo(t *testing.T) { } func TestCall(t *testing.T) { - // Enable request logging - requestlog.EnableRequestLogging(true) + // Create a temporary file for logging + tempLogFile, err := os.CreateTemp(t.TempDir(), "TestCall*.log") + require.NoError(t, err) - // Create a mock logger to capture log output - var logOutput string - mockLogger := log.New() - mockLogger.SetHandler(log.FuncHandler(func(r *log.Record) error { - logOutput += r.Msg + fmt.Sprintf("%s", r.Ctx...) - return nil - })) - requestlog.NewRequestLogger().SetHandler(mockLogger.GetHandler()) + // Enable request logging + logger, err := requestlog.CreateRequestLogger(tempLogFile.Name()) + require.NoError(t, err) + require.NotNil(t, logger) // Test case 1: Normal execution testFunc := func(param string) string { @@ -79,13 +73,18 @@ func TestCall(t *testing.T) { testParam := "test input" expectedResult := "test result: test input" - result := callWithResponse(testFunc, testParam) + result := CallWithResponse(logger, testFunc, testParam) // Check the result if result != expectedResult { t.Errorf("Expected result %s, got %s", expectedResult, result) } + // Read the log file + logData, err := os.ReadFile(tempLogFile.Name()) + require.NoError(t, err) + logOutput := string(logData) + // Check if the log contains expected information expectedLogParts := []string{getShortFunctionName(testFunc), "params", testParam, "resp", expectedResult} for _, part := range expectedLogParts { @@ -94,19 +93,27 @@ func TestCall(t *testing.T) { } } + // Create a mock logger to capture log output + mockLogger := log.New() + mockLogger.SetHandler(log.FuncHandler(func(r *log.Record) error { + logOutput += r.Msg + fmt.Sprintf("%s", r.Ctx...) + return nil + })) + // Test case 2: Panic -> recovery -> re-panic oldRootHandler := log.Root().GetHandler() defer log.Root().SetHandler(oldRootHandler) log.Root().SetHandler(mockLogger.GetHandler()) // Clear log output for next test logOutput = "" + e := "test panic" panicFunc := func() { panic(e) } require.PanicsWithValue(t, e, func() { - call(panicFunc) + Call(logger, panicFunc) }) // Check if the panic was logged @@ -121,35 +128,11 @@ func TestCall(t *testing.T) { } } +func initializeApplication(requestJSON string) string { + return "" +} + func TestGetFunctionName(t *testing.T) { fn := getShortFunctionName(initializeApplication) require.Equal(t, "initializeApplication", fn) } - -type testSignalHandler struct { - receivedSignal string -} - -func (t *testSignalHandler) HandleSignal(data string) { - t.receivedSignal = data -} - -func TestSetMobileSignalHandler(t *testing.T) { - // Setup - handler := &testSignalHandler{} - SetMobileSignalHandler(handler) - t.Cleanup(signal.ResetMobileSignalHandler) - - // Test data - testAccount := &multiaccounts.Account{Name: "test"} - testSettings := &settings.Settings{KeyUID: "0x1"} - testEnsUsernames := json.RawMessage(`{"test": "test"}`) - - // Action - signal.SendLoggedIn(testAccount, testSettings, testEnsUsernames, nil) - - // Assertions - require.Contains(t, handler.receivedSignal, `"key-uid":"0x1"`, "Signal should contain the correct KeyUID") - require.Contains(t, handler.receivedSignal, `"name":"test"`, "Signal should contain the correct account name") - require.Contains(t, handler.receivedSignal, `"ensUsernames":{"test":"test"}`, "Signal should contain the correct ENS usernames") -} diff --git a/mobile/init_logging_test.go b/mobile/init_logging_test.go index 95b37bfe3bc..4f2159b7140 100644 --- a/mobile/init_logging_test.go +++ b/mobile/init_logging_test.go @@ -22,7 +22,7 @@ func TestInitLogging(t *testing.T) { require.Equal(t, `{"error":""}`, response) _, err := os.Stat(gethLogFile) require.NoError(t, err) - require.True(t, requestlog.IsRequestLoggingEnabled()) + require.NotNil(t, requestlog.GetRequestLogger()) // requests log file should not be created yet _, err = os.Stat(requestsLogFile) diff --git a/mobile/status.go b/mobile/status.go index a121d85b4df..9eb942d7aea 100644 --- a/mobile/status.go +++ b/mobile/status.go @@ -7,9 +7,9 @@ import ( "fmt" "unsafe" + "go.uber.org/zap" validator "gopkg.in/go-playground/validator.v9" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/status-im/zxcvbn-go" @@ -47,8 +47,18 @@ import ( "github.com/status-im/status-go/services/typeddata" "github.com/status-im/status-go/signal" "github.com/status-im/status-go/transactions" + + "github.com/status-im/status-go/mobile/callog" ) +func call(fn any, params ...any) any { + return callog.Call(requestlog.GetRequestLogger(), fn, params...) +} + +func callWithResponse(fn any, params ...any) string { + return callog.CallWithResponse(requestlog.GetRequestLogger(), fn, params...) +} + type InitializeApplicationResponse struct { Accounts []multiaccounts.Account `json:"accounts"` CentralizedMetricsInfo *centralizedmetrics.MetricsInfo `json:"centralizedMetricsInfo"` @@ -366,19 +376,19 @@ func login(accountData, password, configJSON string) error { } api.RunAsync(func() error { - log.Debug("start a node with account", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("start a node with account", zap.String("key-uid", account.KeyUID)) err := statusBackend.UpdateNodeConfigFleet(account, password, &conf) if err != nil { - log.Error("failed to update node config fleet", "key-uid", account.KeyUID, "error", err) + logutils.ZapLogger().Error("failed to update node config fleet", zap.String("key-uid", account.KeyUID), zap.Error(err)) return statusBackend.LoggedIn(account.KeyUID, err) } err = statusBackend.StartNodeWithAccount(account, password, &conf, nil) if err != nil { - log.Error("failed to start a node", "key-uid", account.KeyUID, "error", err) + logutils.ZapLogger().Error("failed to start a node", zap.String("key-uid", account.KeyUID), zap.Error(err)) return err } - log.Debug("started a node with", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("started a node with", zap.String("key-uid", account.KeyUID)) return nil }) @@ -431,18 +441,27 @@ func createAccountAndLogin(requestJSON string) string { } api.RunAsync(func() error { - log.Debug("starting a node and creating config") + logutils.ZapLogger().Debug("starting a node and creating config") _, err := statusBackend.CreateAccountAndLogin(&request) if err != nil { - log.Error("failed to create account", "error", err) + logutils.ZapLogger().Error("failed to create account", zap.Error(err)) return err } - log.Debug("started a node, and created account") + logutils.ZapLogger().Debug("started a node, and created account") return nil }) return makeJSONResponse(nil) } +func AcceptTerms() string { + return callWithResponse(acceptTerms) +} + +func acceptTerms() string { + err := statusBackend.AcceptTerms() + return makeJSONResponse(err) +} + func LoginAccount(requestJSON string) string { return callWithResponse(loginAccount, requestJSON) } @@ -462,10 +481,10 @@ func loginAccount(requestJSON string) string { api.RunAsync(func() error { err := statusBackend.LoginAccount(&request) if err != nil { - log.Error("loginAccount failed", "error", err) + logutils.ZapLogger().Error("loginAccount failed", zap.Error(err)) return err } - log.Debug("loginAccount started node") + logutils.ZapLogger().Debug("loginAccount started node") return nil }) return makeJSONResponse(nil) @@ -488,7 +507,7 @@ func restoreAccountAndLogin(requestJSON string) string { } api.RunAsync(func() error { - log.Debug("starting a node and restoring account") + logutils.ZapLogger().Debug("starting a node and restoring account") if request.Keycard != nil { _, err = statusBackend.RestoreKeycardAccountAndLogin(&request) @@ -497,10 +516,10 @@ func restoreAccountAndLogin(requestJSON string) string { } if err != nil { - log.Error("failed to restore account", "error", err) + logutils.ZapLogger().Error("failed to restore account", zap.Error(err)) return err } - log.Debug("started a node, and restored account") + logutils.ZapLogger().Debug("started a node, and restored account") return nil }) @@ -537,13 +556,13 @@ func SaveAccountAndLogin(accountData, password, settingsJSON, configJSON, subacc } api.RunAsync(func() error { - log.Debug("starting a node, and saving account with configuration", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("starting a node, and saving account with configuration", zap.String("key-uid", account.KeyUID)) err := statusBackend.StartNodeWithAccountAndInitialConfig(account, password, settings, &conf, subaccs, nil) if err != nil { - log.Error("failed to start node and save account", "key-uid", account.KeyUID, "error", err) + logutils.ZapLogger().Error("failed to start node and save account", zap.String("key-uid", account.KeyUID), zap.Error(err)) return err } - log.Debug("started a node, and saved account", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("started a node, and saved account", zap.String("key-uid", account.KeyUID)) return nil }) return makeJSONResponse(nil) @@ -625,13 +644,13 @@ func SaveAccountAndLoginWithKeycard(accountData, password, settingsJSON, configJ } api.RunAsync(func() error { - log.Debug("starting a node, and saving account with configuration", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("starting a node, and saving account with configuration", zap.String("key-uid", account.KeyUID)) err := statusBackend.SaveAccountAndStartNodeWithKey(account, password, settings, &conf, subaccs, keyHex) if err != nil { - log.Error("failed to start node and save account", "key-uid", account.KeyUID, "error", err) + logutils.ZapLogger().Error("failed to start node and save account", zap.String("key-uid", account.KeyUID), zap.Error(err)) return err } - log.Debug("started a node, and saved account", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("started a node, and saved account", zap.String("key-uid", account.KeyUID)) return nil }) return makeJSONResponse(nil) @@ -652,13 +671,13 @@ func LoginWithKeycard(accountData, password, keyHex string, configJSON string) s return makeJSONResponse(err) } api.RunAsync(func() error { - log.Debug("start a node with account", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("start a node with account", zap.String("key-uid", account.KeyUID)) err := statusBackend.StartNodeWithKey(account, password, keyHex, &conf) if err != nil { - log.Error("failed to start a node", "key-uid", account.KeyUID, "error", err) + logutils.ZapLogger().Error("failed to start a node", zap.String("key-uid", account.KeyUID), zap.Error(err)) return err } - log.Debug("started a node with", "key-uid", account.KeyUID) + logutils.ZapLogger().Debug("started a node with", zap.String("key-uid", account.KeyUID)) return nil }) return makeJSONResponse(nil) @@ -946,7 +965,7 @@ func writeHeapProfile(dataDir string) string { //nolint: deadcode func makeJSONResponse(err error) string { errString := "" if err != nil { - log.Error("error in makeJSONResponse", "error", err) + logutils.ZapLogger().Error("error in makeJSONResponse", zap.Error(err)) errString = err.Error() } @@ -1641,7 +1660,7 @@ func EncodeTransfer(to string, value string) string { func encodeTransfer(to string, value string) string { result, err := abi_spec.EncodeTransfer(to, value) if err != nil { - log.Error("failed to encode transfer", "to", to, "value", value, "error", err) + logutils.ZapLogger().Error("failed to encode transfer", zap.String("to", to), zap.String("value", value), zap.Error(err)) return "" } return result @@ -1654,7 +1673,7 @@ func EncodeFunctionCall(method string, paramsJSON string) string { func encodeFunctionCall(method string, paramsJSON string) string { result, err := abi_spec.Encode(method, paramsJSON) if err != nil { - log.Error("failed to encode function call", "method", method, "paramsJSON", paramsJSON, "error", err) + logutils.ZapLogger().Error("failed to encode function call", zap.String("method", method), zap.String("paramsJSON", paramsJSON), zap.Error(err)) return "" } return result @@ -1671,17 +1690,17 @@ func decodeParameters(decodeParamJSON string) string { }{} err := json.Unmarshal([]byte(decodeParamJSON), &decodeParam) if err != nil { - log.Error("failed to unmarshal json when decoding parameters", "decodeParamJSON", decodeParamJSON, "error", err) + logutils.ZapLogger().Error("failed to unmarshal json when decoding parameters", zap.String("decodeParamJSON", decodeParamJSON), zap.Error(err)) return "" } result, err := abi_spec.Decode(decodeParam.BytesString, decodeParam.Types) if err != nil { - log.Error("failed to decode parameters", "decodeParamJSON", decodeParamJSON, "error", err) + logutils.ZapLogger().Error("failed to decode parameters", zap.String("decodeParamJSON", decodeParamJSON), zap.Error(err)) return "" } bytes, err := json.Marshal(result) if err != nil { - log.Error("failed to marshal result", "result", result, "decodeParamJSON", decodeParamJSON, "error", err) + logutils.ZapLogger().Error("failed to marshal result", zap.Any("result", result), zap.String("decodeParamJSON", decodeParamJSON), zap.Error(err)) return "" } return string(bytes) @@ -1714,7 +1733,7 @@ func Utf8ToHex(str string) string { func utf8ToHex(str string) string { hexString, err := abi_spec.Utf8ToHex(str) if err != nil { - log.Error("failed to convert utf8 to hex", "str", str, "error", err) + logutils.ZapLogger().Error("failed to convert utf8 to hex", zap.String("str", str), zap.Error(err)) } return hexString } @@ -1726,7 +1745,7 @@ func HexToUtf8(hexString string) string { func hexToUtf8(hexString string) string { str, err := abi_spec.HexToUtf8(hexString) if err != nil { - log.Error("failed to convert hex to utf8", "hexString", hexString, "error", err) + logutils.ZapLogger().Error("failed to convert hex to utf8", zap.String("hexString", hexString), zap.Error(err)) } return str } @@ -1738,7 +1757,7 @@ func CheckAddressChecksum(address string) string { func checkAddressChecksum(address string) string { valid, err := abi_spec.CheckAddressChecksum(address) if err != nil { - log.Error("failed to invoke check address checksum", "address", address, "error", err) + logutils.ZapLogger().Error("failed to invoke check address checksum", zap.String("address", address), zap.Error(err)) } result, _ := json.Marshal(valid) return string(result) @@ -1751,7 +1770,7 @@ func IsAddress(address string) string { func isAddress(address string) string { valid, err := abi_spec.IsAddress(address) if err != nil { - log.Error("failed to invoke IsAddress", "address", address, "error", err) + logutils.ZapLogger().Error("failed to invoke IsAddress", zap.String("address", address), zap.Error(err)) } result, _ := json.Marshal(valid) return string(result) @@ -1764,7 +1783,7 @@ func ToChecksumAddress(address string) string { func toChecksumAddress(address string) string { address, err := abi_spec.ToChecksumAddress(address) if err != nil { - log.Error("failed to convert to checksum address", "address", address, "error", err) + logutils.ZapLogger().Error("failed to convert to checksum address", zap.String("address", address), zap.Error(err)) } return address } @@ -1796,7 +1815,7 @@ func InitLogging(logSettingsJSON string) string { } if err = logutils.OverrideRootLogWithConfig(logSettings.LogSettings, false); err == nil { - log.Info("logging initialised", "logSettings", logSettingsJSON) + logutils.ZapLogger().Info("logging initialised", zap.String("logSettings", logSettingsJSON)) } if logSettings.LogRequestGo { diff --git a/mobile/status_geth.go b/mobile/status_geth.go index 9a6500c9fed..b450f06fca0 100644 --- a/mobile/status_geth.go +++ b/mobile/status_geth.go @@ -2,6 +2,7 @@ package statusgo import ( "github.com/status-im/status-go/api" + "github.com/status-im/status-go/logutils" ) -var statusBackend = api.NewGethStatusBackend() +var statusBackend = api.NewGethStatusBackend(logutils.ZapLogger()) diff --git a/mobile/status_test.go b/mobile/status_test.go new file mode 100644 index 00000000000..eef81854d41 --- /dev/null +++ b/mobile/status_test.go @@ -0,0 +1,40 @@ +package statusgo + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/status-im/status-go/multiaccounts" + "github.com/status-im/status-go/multiaccounts/settings" + "github.com/status-im/status-go/signal" +) + +type testSignalHandler struct { + receivedSignal string +} + +func (t *testSignalHandler) HandleSignal(data string) { + t.receivedSignal = data +} + +func TestSetMobileSignalHandler(t *testing.T) { + // Setup + handler := &testSignalHandler{} + SetMobileSignalHandler(handler) + t.Cleanup(signal.ResetMobileSignalHandler) + + // Test data + testAccount := &multiaccounts.Account{Name: "test"} + testSettings := &settings.Settings{KeyUID: "0x1"} + testEnsUsernames := json.RawMessage(`{"test": "test"}`) + + // Action + signal.SendLoggedIn(testAccount, testSettings, testEnsUsernames, nil) + + // Assertions + require.Contains(t, handler.receivedSignal, `"key-uid":"0x1"`, "Signal should contain the correct KeyUID") + require.Contains(t, handler.receivedSignal, `"name":"test"`, "Signal should contain the correct account name") + require.Contains(t, handler.receivedSignal, `"ensUsernames":{"test":"test"}`, "Signal should contain the correct ENS usernames") +} diff --git a/multiaccounts/database.go b/multiaccounts/database.go index f3b427409f1..b466a01f682 100644 --- a/multiaccounts/database.go +++ b/multiaccounts/database.go @@ -5,9 +5,9 @@ import ( "database/sql" "encoding/json" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/common/dbsetup" "github.com/status-im/status-go/images" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/common" "github.com/status-im/status-go/multiaccounts/migrations" "github.com/status-im/status-go/protocol/protobuf" @@ -29,6 +29,9 @@ type Account struct { Images []images.IdentityImage `json:"images"` KDFIterations int `json:"kdfIterations,omitempty"` CustomizationColorClock uint64 `json:"-"` + + // HasAcceptedTerms will be set to true when the first account is created. + HasAcceptedTerms bool `json:"hasAcceptedTerms"` } func (a *Account) RefersToKeycard() bool { @@ -145,7 +148,7 @@ func (db *Database) GetAccountKDFIterationsNumber(keyUID string) (kdfIterationsN } func (db *Database) GetAccounts() (rst []Account, err error) { - rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid ORDER BY loginTimestamp DESC") + rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, a.hasAcceptedTerms, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid ORDER BY loginTimestamp DESC") if err != nil { return nil, err } @@ -179,6 +182,7 @@ func (db *Database) GetAccounts() (rst []Account, err error) { &acc.KeycardPairing, &acc.KeyUID, &acc.KDFIterations, + &acc.HasAcceptedTerms, &iiName, &ii.Payload, &iiWidth, @@ -236,8 +240,14 @@ func (db *Database) GetAccounts() (rst []Account, err error) { return rst, nil } +func (db *Database) GetAccountsCount() (int, error) { + var count int + err := db.db.QueryRow("SELECT COUNT(1) FROM accounts").Scan(&count) + return count, err +} + func (db *Database) GetAccount(keyUID string) (*Account, error) { - rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, ii.key_uid, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid WHERE a.keyUid = ? ORDER BY loginTimestamp DESC", keyUID) + rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, a.hasAcceptedTerms, ii.key_uid, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid WHERE a.keyUid = ? ORDER BY loginTimestamp DESC", keyUID) if err != nil { return nil, err } @@ -273,6 +283,7 @@ func (db *Database) GetAccount(keyUID string) (*Account, error) { &acc.KeycardPairing, &acc.KeyUID, &acc.KDFIterations, + &acc.HasAcceptedTerms, &iiKeyUID, &iiName, &ii.Payload, @@ -323,7 +334,7 @@ func (db *Database) SaveAccount(account Account) error { account.KDFIterations = dbsetup.ReducedKDFIterationsNumber } - _, err = db.db.Exec("INSERT OR REPLACE INTO accounts (name, identicon, colorHash, colorId, customizationColor, customizationColorClock, keycardPairing, keyUid, kdfIterations, loginTimestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KeyUID, account.KDFIterations, account.Timestamp) + _, err = db.db.Exec("INSERT OR REPLACE INTO accounts (name, identicon, colorHash, colorId, customizationColor, customizationColorClock, keycardPairing, keyUid, kdfIterations, loginTimestamp, hasAcceptedTerms) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KeyUID, account.KDFIterations, account.Timestamp, account.HasAcceptedTerms) if err != nil { return err } @@ -340,6 +351,11 @@ func (db *Database) UpdateDisplayName(keyUID string, displayName string) error { return err } +func (db *Database) UpdateHasAcceptedTerms(keyUID string, hasAcceptedTerms bool) error { + _, err := db.db.Exec("UPDATE accounts SET hasAcceptedTerms = ? WHERE keyUid = ?", hasAcceptedTerms, keyUID) + return err +} + func (db *Database) UpdateAccount(account Account) error { colorHash, err := json.Marshal(account.ColorHash) if err != nil { @@ -350,7 +366,7 @@ func (db *Database) UpdateAccount(account Account) error { account.KDFIterations = dbsetup.ReducedKDFIterationsNumber } - _, err = db.db.Exec("UPDATE accounts SET name = ?, identicon = ?, colorHash = ?, colorId = ?, customizationColor = ?, customizationColorClock = ?, keycardPairing = ?, kdfIterations = ? WHERE keyUid = ?", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KDFIterations, account.KeyUID) + _, err = db.db.Exec("UPDATE accounts SET name = ?, identicon = ?, colorHash = ?, colorId = ?, customizationColor = ?, customizationColorClock = ?, keycardPairing = ?, kdfIterations = ?, hasAcceptedTerms = ? WHERE keyUid = ?", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KDFIterations, account.HasAcceptedTerms, account.KeyUID) return err } @@ -468,7 +484,7 @@ func (db *Database) publishOnIdentityImageSubscriptions(change *IdentityImageSub select { case s <- change: default: - log.Warn("subscription channel full, dropping message") + logutils.ZapLogger().Warn("subscription channel full, dropping message") } } } diff --git a/multiaccounts/database_test.go b/multiaccounts/database_test.go index a4cdce33dba..a6647ea6c0a 100644 --- a/multiaccounts/database_test.go +++ b/multiaccounts/database_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "io/ioutil" "os" + "strings" "testing" "github.com/status-im/status-go/common/dbsetup" @@ -39,10 +40,17 @@ func TestAccounts(t *testing.T) { func TestAccountsUpdate(t *testing.T) { db, stop := setupTestDB(t) defer stop() - expected := Account{KeyUID: "string", CustomizationColor: common.CustomizationColorBlue, ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, ColorID: 10, KDFIterations: dbsetup.ReducedKDFIterationsNumber} + expected := Account{ + KeyUID: "string", + CustomizationColor: common.CustomizationColorBlue, + ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, + ColorID: 10, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + } require.NoError(t, db.SaveAccount(expected)) expected.Name = "chars" expected.CustomizationColor = common.CustomizationColorMagenta + expected.HasAcceptedTerms = true require.NoError(t, db.UpdateAccount(expected)) rst, err := db.GetAccounts() require.NoError(t, err) @@ -50,6 +58,53 @@ func TestAccountsUpdate(t *testing.T) { require.Equal(t, expected, rst[0]) } +func TestUpdateHasAcceptedTerms(t *testing.T) { + db, stop := setupTestDB(t) + defer stop() + keyUID := "string" + expected := Account{ + KeyUID: keyUID, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + } + require.NoError(t, db.SaveAccount(expected)) + accounts, err := db.GetAccounts() + require.NoError(t, err) + require.Equal(t, []Account{expected}, accounts) + + // Update from false -> true + require.NoError(t, db.UpdateHasAcceptedTerms(keyUID, true)) + account, err := db.GetAccount(keyUID) + require.NoError(t, err) + expected.HasAcceptedTerms = true + require.Equal(t, &expected, account) + + // Update from true -> false + require.NoError(t, db.UpdateHasAcceptedTerms(keyUID, false)) + account, err = db.GetAccount(keyUID) + require.NoError(t, err) + expected.HasAcceptedTerms = false + require.Equal(t, &expected, account) +} + +func TestDatabase_GetAccountsCount(t *testing.T) { + db, stop := setupTestDB(t) + defer stop() + + count, err := db.GetAccountsCount() + require.NoError(t, err) + require.Equal(t, 0, count) + + account := Account{ + KeyUID: keyUID, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + } + require.NoError(t, db.SaveAccount(account)) + + count, err = db.GetAccountsCount() + require.NoError(t, err) + require.Equal(t, 1, count) +} + func TestLoginUpdate(t *testing.T) { db, stop := setupTestDB(t) defer stop() @@ -148,20 +203,26 @@ func TestDatabase_DeleteIdentityImage(t *testing.T) { require.Empty(t, oii) } +func removeAllWhitespace(s string) string { + tmp := strings.ReplaceAll(s, " ", "") + tmp = strings.ReplaceAll(tmp, "\n", "") + tmp = strings.ReplaceAll(tmp, "\t", "") + return tmp +} + func TestDatabase_GetAccountsWithIdentityImages(t *testing.T) { db, stop := setupTestDB(t) defer stop() testAccs := []Account{ - {Name: "string", KeyUID: keyUID, Identicon: "data"}, + {Name: "string", KeyUID: keyUID, Identicon: "data", HasAcceptedTerms: true}, {Name: "string", KeyUID: keyUID2}, {Name: "string", KeyUID: keyUID2 + "2"}, {Name: "string", KeyUID: keyUID2 + "3"}, } - expected := `[{"name":"string","timestamp":100,"identicon":"data","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0xdeadbeef","images":[{"keyUid":"0xdeadbeef","type":"large","uri":"data:image/png;base64,iVBORw0KGgoAAAANSUg=","width":240,"height":300,"fileSize":1024,"resizeTarget":240,"clock":0},{"keyUid":"0xdeadbeef","type":"thumbnail","uri":"data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=","width":80,"height":80,"fileSize":256,"resizeTarget":80,"clock":0}],"kdfIterations":3200},{"name":"string","timestamp":10,"identicon":"","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0x1337beef","images":null,"kdfIterations":3200},{"name":"string","timestamp":0,"identicon":"","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0x1337beef2","images":null,"kdfIterations":3200},{"name":"string","timestamp":0,"identicon":"","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0x1337beef3","images":[{"keyUid":"0x1337beef3","type":"large","uri":"data:image/png;base64,iVBORw0KGgoAAAANSUg=","width":240,"height":300,"fileSize":1024,"resizeTarget":240,"clock":0},{"keyUid":"0x1337beef3","type":"thumbnail","uri":"data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=","width":80,"height":80,"fileSize":256,"resizeTarget":80,"clock":0}],"kdfIterations":3200}]` for _, a := range testAccs { - require.NoError(t, db.SaveAccount(a)) + require.NoError(t, db.SaveAccount(a), a.KeyUID) } seedTestDBWithIdentityImages(t, db, keyUID) @@ -178,14 +239,116 @@ func TestDatabase_GetAccountsWithIdentityImages(t *testing.T) { accJSON, err := json.Marshal(accs) require.NoError(t, err) - require.Exactly(t, expected, string(accJSON)) + expected := ` +[ + { + "name": "string", + "timestamp": 100, + "identicon": "data", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0xdeadbeef", + "images": [ + { + "keyUid": "0xdeadbeef", + "type": "large", + "uri": "data:image/png;base64,iVBORw0KGgoAAAANSUg=", + "width": 240, + "height": 300, + "fileSize": 1024, + "resizeTarget": 240, + "clock": 0 + }, + { + "keyUid": "0xdeadbeef", + "type": "thumbnail", + "uri": "data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=", + "width": 80, + "height": 80, + "fileSize": 256, + "resizeTarget": 80, + "clock": 0 + } + ], + "kdfIterations": 3200, + "hasAcceptedTerms": true + }, + { + "name": "string", + "timestamp": 10, + "identicon": "", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0x1337beef", + "images": null, + "kdfIterations": 3200, + "hasAcceptedTerms": false + }, + { + "name": "string", + "timestamp": 0, + "identicon": "", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0x1337beef2", + "images": null, + "kdfIterations": 3200, + "hasAcceptedTerms": false + }, + { + "name": "string", + "timestamp": 0, + "identicon": "", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0x1337beef3", + "images": [ + { + "keyUid": "0x1337beef3", + "type": "large", + "uri": "data:image/png;base64,iVBORw0KGgoAAAANSUg=", + "width": 240, + "height": 300, + "fileSize": 1024, + "resizeTarget": 240, + "clock": 0 + }, + { + "keyUid": "0x1337beef3", + "type": "thumbnail", + "uri": "data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=", + "width": 80, + "height": 80, + "fileSize": 256, + "resizeTarget": 80, + "clock": 0 + } + ], + "kdfIterations": 3200, + "hasAcceptedTerms": false + } +] +` + + require.Exactly(t, removeAllWhitespace(expected), string(accJSON)) } func TestDatabase_GetAccount(t *testing.T) { db, stop := setupTestDB(t) defer stop() - expected := Account{Name: "string", KeyUID: keyUID, ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, ColorID: 10, KDFIterations: dbsetup.ReducedKDFIterationsNumber} + expected := Account{ + Name: "string", + KeyUID: keyUID, + ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, + ColorID: 10, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + HasAcceptedTerms: true, + } require.NoError(t, db.SaveAccount(expected)) account, err := db.GetAccount(expected.KeyUID) diff --git a/multiaccounts/migrations/sql/1724407149_add_has_accepted_terms_to_accounts.up.sql b/multiaccounts/migrations/sql/1724407149_add_has_accepted_terms_to_accounts.up.sql new file mode 100644 index 00000000000..d346f880297 --- /dev/null +++ b/multiaccounts/migrations/sql/1724407149_add_has_accepted_terms_to_accounts.up.sql @@ -0,0 +1 @@ +ALTER TABLE accounts ADD COLUMN hasAcceptedTerms BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/multiaccounts/settings/database.go b/multiaccounts/settings/database.go index 708a09a120f..1d048a53abf 100644 --- a/multiaccounts/settings/database.go +++ b/multiaccounts/settings/database.go @@ -8,10 +8,9 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" - "github.com/status-im/status-go/common/dbsetup" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/errors" "github.com/status-im/status-go/nodecfg" "github.com/status-im/status-go/params" @@ -836,7 +835,7 @@ func (db *Database) postChangesToSubscribers(change *SyncSettingField) { select { case s <- change: default: - log.Warn("settings changes subscription channel full, dropping message") + logutils.ZapLogger().Warn("settings changes subscription channel full, dropping message") } } } diff --git a/nix/pkgs/codecov-cli/default.nix b/nix/pkgs/codecov-cli/default.nix index 12fbf83e01b..b342a1561e2 100644 --- a/nix/pkgs/codecov-cli/default.nix +++ b/nix/pkgs/codecov-cli/default.nix @@ -16,6 +16,7 @@ in stdenv.mkDerivation rec { url = "https://cli.codecov.io/v${version}/${platform}/codecov"; hash = lib.getAttr builtins.currentSystem { aarch64-darwin = "sha256-CB1D8/zYF23Jes9sd6rJiadDg7nwwee9xWSYqSByAlU="; + x86_64-darwin = "sha256-CB1D8/zYF23Jes9sd6rJiadDg7nwwee9xWSYqSByAlU="; x86_64-linux = "sha256-65AgCcuAD977zikcE1eVP4Dik4L0PHqYzOO1fStNjOw="; aarch64-linux = "sha256-hALtVSXY40uTIaAtwWr7EXh7zclhK63r7a341Tn+q/g="; }; diff --git a/nix/shell.nix b/nix/shell.nix index 10f2647c77e..5a29bac6aa3 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -16,7 +16,12 @@ let inherit xcodeWrapper; withAndroidPkgs = !isMacM1; }; -in pkgs.mkShell { + /* Override the default SDK to enable darwin-x86_64 builds */ + appleSdk11Stdenv = pkgs.overrideSDK pkgs.stdenv "11.0"; + sdk11mkShell = pkgs.mkShell.override { stdenv = appleSdk11Stdenv; }; + mkShell = if stdenv.isDarwin then sdk11mkShell else pkgs.mkShell; + +in mkShell { name = "status-go-shell"; buildInputs = with pkgs; [ diff --git a/node/get_status_node.go b/node/get_status_node.go index 0e298468403..0ec00dd8546 100644 --- a/node/get_status_node.go +++ b/node/get_status_node.go @@ -12,10 +12,10 @@ import ( "sync" "github.com/syndtr/goleveldb/leveldb" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" @@ -95,7 +95,7 @@ type StatusNode struct { peerPool *peers.PeerPool db *leveldb.DB // used as a cache for PeerPool - log log.Logger + logger *zap.Logger gethAccountManager *account.GethManager accountsManager *accounts.Manager @@ -141,11 +141,12 @@ type StatusNode struct { } // New makes new instance of StatusNode. -func New(transactor *transactions.Transactor) *StatusNode { +func New(transactor *transactions.Transactor, logger *zap.Logger) *StatusNode { + logger = logger.Named("StatusNode") return &StatusNode{ - gethAccountManager: account.NewGethManager(), + gethAccountManager: account.NewGethManager(logger), transactor: transactor, - log: log.New("package", "status-go/node.StatusNode"), + logger: logger, publicMethods: make(map[string]bool), } } @@ -204,7 +205,7 @@ type StartOptions struct { // The server can only handle requests that don't require appdb or IPFS downloader func (n *StatusNode) StartMediaServerWithoutDB() error { if n.isRunning() { - n.log.Debug("node is already running, no need to StartMediaServerWithoutDB") + n.logger.Debug("node is already running, no need to StartMediaServerWithoutDB") return nil } @@ -235,13 +236,13 @@ func (n *StatusNode) StartWithOptions(config *params.NodeConfig, options StartOp defer n.mu.Unlock() if n.isRunning() { - n.log.Debug("node is already running") + n.logger.Debug("node is already running") return ErrNodeRunning } n.accountsManager = options.AccountsManager - n.log.Debug("starting with options", "ClusterConfig", config.ClusterConfig) + n.logger.Debug("starting with options", zap.Stringer("ClusterConfig", &config.ClusterConfig)) db, err := db.Create(config.DataDir, params.StatusDatabase) if err != nil { @@ -259,7 +260,7 @@ func (n *StatusNode) StartWithOptions(config *params.NodeConfig, options StartOp if err != nil { if dberr := db.Close(); dberr != nil { - n.log.Error("error while closing leveldb after node crash", "error", dberr) + n.logger.Error("error while closing leveldb after node crash", zap.Error(dberr)) } n.db = nil return err @@ -364,7 +365,7 @@ func (n *StatusNode) discoverNode() (*enode.Node, error) { return discNode, nil } - n.log.Info("Using AdvertiseAddr for rendezvous", "addr", n.config.AdvertiseAddr) + n.logger.Info("Using AdvertiseAddr for rendezvous", zap.String("addr", n.config.AdvertiseAddr)) r := discNode.Record() r.Set(enr.IP(net.ParseIP(n.config.AdvertiseAddr))) @@ -406,11 +407,10 @@ func (n *StatusNode) startDiscovery() error { } else { n.discovery = discoveries[0] } - log.Debug( - "using discovery", - "instance", reflect.TypeOf(n.discovery), - "registerTopics", n.config.RegisterTopics, - "requireTopics", n.config.RequireTopics, + n.logger.Debug("using discovery", + zap.Any("instance", reflect.TypeOf(n.discovery)), + zap.Any("registerTopics", n.config.RegisterTopics), + zap.Any("requireTopics", n.config.RequireTopics), ) n.register = peers.NewRegister(n.discovery, n.config.RegisterTopics...) options := peers.NewDefaultOptions() @@ -449,7 +449,7 @@ func (n *StatusNode) Stop() error { func (n *StatusNode) stop() error { if n.isDiscoveryRunning() { if err := n.stopDiscovery(); err != nil { - n.log.Error("Error stopping the discovery components", "error", err) + n.logger.Error("Error stopping the discovery components", zap.Error(err)) } n.register = nil n.peerPool = nil @@ -478,7 +478,7 @@ func (n *StatusNode) stop() error { if n.db != nil { if err = n.db.Close(); err != nil { - n.log.Error("Error closing the leveldb of status node", "error", err) + n.logger.Error("Error closing the leveldb of status node", zap.Error(err)) return err } n.db = nil @@ -509,7 +509,7 @@ func (n *StatusNode) stop() error { n.publicMethods = make(map[string]bool) n.pendingTracker = nil n.appGeneralSrvc = nil - n.log.Debug("status node stopped") + n.logger.Debug("status node stopped") return nil } @@ -538,7 +538,7 @@ func (n *StatusNode) ResetChainData(config *params.NodeConfig) error { } err := os.RemoveAll(chainDataDir) if err == nil { - n.log.Info("Chain data has been removed", "dir", chainDataDir) + n.logger.Info("Chain data has been removed", zap.String("dir", chainDataDir)) } return err } @@ -558,16 +558,16 @@ func (n *StatusNode) isRunning() bool { // populateStaticPeers connects current node with our publicly available LES/SHH/Swarm cluster func (n *StatusNode) populateStaticPeers() error { if !n.config.ClusterConfig.Enabled { - n.log.Info("Static peers are disabled") + n.logger.Info("Static peers are disabled") return nil } for _, enode := range n.config.ClusterConfig.StaticNodes { if err := n.addPeer(enode); err != nil { - n.log.Error("Static peer addition failed", "error", err) + n.logger.Error("Static peer addition failed", zap.Error(err)) return err } - n.log.Info("Static peer added", "enode", enode) + n.logger.Info("Static peer added", zap.String("enode", enode)) } return nil @@ -575,16 +575,16 @@ func (n *StatusNode) populateStaticPeers() error { func (n *StatusNode) removeStaticPeers() error { if !n.config.ClusterConfig.Enabled { - n.log.Info("Static peers are disabled") + n.logger.Info("Static peers are disabled") return nil } for _, enode := range n.config.ClusterConfig.StaticNodes { if err := n.removePeer(enode); err != nil { - n.log.Error("Static peer deletion failed", "error", err) + n.logger.Error("Static peer deletion failed", zap.Error(err)) return err } - n.log.Info("Static peer deleted", "enode", enode) + n.logger.Info("Static peer deleted", zap.String("enode", enode)) } return nil } diff --git a/node/geth_node.go b/node/geth_node.go index ccdc57a07d1..7c56db5dde2 100644 --- a/node/geth_node.go +++ b/node/geth_node.go @@ -7,9 +7,9 @@ import ( "path/filepath" "github.com/syndtr/goleveldb/leveldb" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discv5" @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/nat" "github.com/status-im/status-go/eth-node/crypto" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" ) @@ -33,7 +34,7 @@ var ( ) // All general log messages in this package should be routed through this logger. -var logger = log.New("package", "status-go/node") +var logger = logutils.ZapLogger().Named("node") // MakeNode creates a geth node entity func MakeNode(config *params.NodeConfig, accs *accounts.Manager, db *leveldb.DB) (*node.Node, error) { @@ -146,7 +147,7 @@ func parseNodes(enodes []string) []*enode.Node { if err == nil { nodes = append(nodes, parsedPeer) } else { - logger.Error("Failed to parse enode", "enode", item, "err", err) + logger.Error("Failed to parse enode", zap.String("enode", item), zap.Error(err)) } } @@ -162,7 +163,7 @@ func parseNodesV5(enodes []string) []*discv5.Node { if err == nil { nodes = append(nodes, parsedPeer) } else { - logger.Error("Failed to parse enode", "enode", enode, "err", err) + logger.Error("Failed to parse enode", zap.String("enode", enode), zap.Error(err)) } } return nodes diff --git a/node/geth_status_node_test.go b/node/geth_status_node_test.go index 52a1bc3f19d..e401a25d05c 100644 --- a/node/geth_status_node_test.go +++ b/node/geth_status_node_test.go @@ -8,12 +8,15 @@ import ( "testing" "time" + "go.uber.org/zap" + gethnode "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/stretchr/testify/require" "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol/tt" "github.com/status-im/status-go/t/helpers" "github.com/status-im/status-go/t/utils" ) @@ -21,7 +24,7 @@ import ( func TestStatusNodeStart(t *testing.T) { config, err := utils.MakeTestNodeConfigWithDataDir("", "", params.StatusChainNetworkID) require.NoError(t, err) - n := New(nil) + n := New(nil, tt.MustCreateTestLogger()) // checks before node is started require.Nil(t, n.GethNode()) @@ -33,7 +36,7 @@ func TestStatusNodeStart(t *testing.T) { defer func() { err := stop() if err != nil { - n.log.Error("stopping db", err) + n.logger.Error("stopping db", zap.Error(err)) } }() require.NoError(t, err) @@ -83,13 +86,13 @@ func TestStatusNodeWithDataDir(t *testing.T) { defer func() { err := stop1() if err != nil { - n.log.Error("stopping db", err) + n.logger.Error("stopping db", zap.Error(err)) } }() defer func() { err := stop2() if err != nil { - n.log.Error("stopping multiaccount db", err) + n.logger.Error("stopping multiaccount db", zap.Error(err)) } }() require.NoError(t, err) @@ -118,13 +121,13 @@ func TestStatusNodeAddPeer(t *testing.T) { defer func() { err := stop1() if err != nil { - n.log.Error("stopping db", err) + n.logger.Error("stopping db", zap.Error(err)) } }() defer func() { err := stop2() if err != nil { - n.log.Error("stopping multiaccount db", err) + n.logger.Error("stopping multiaccount db", zap.Error(err)) } }() require.NoError(t, err) @@ -157,13 +160,13 @@ func TestStatusNodeDiscoverNode(t *testing.T) { defer func() { err := stop1() if err != nil { - n.log.Error("stopping db", err) + n.logger.Error("stopping db", zap.Error(err)) } }() defer func() { err := stop2() if err != nil { - n.log.Error("stopping multiaccount db", err) + n.logger.Error("stopping multiaccount db", zap.Error(err)) } }() require.NoError(t, err) @@ -183,13 +186,13 @@ func TestStatusNodeDiscoverNode(t *testing.T) { defer func() { err := stop11() if err != nil { - n1.log.Error("stopping db", err) + n1.logger.Error("stopping db", zap.Error(err)) } }() defer func() { err := stop12() if err != nil { - n1.log.Error("stopping multiaccount db", err) + n1.logger.Error("stopping multiaccount db", zap.Error(err)) } }() require.NoError(t, err) diff --git a/node/status_node_rpc_client_test.go b/node/status_node_rpc_client_test.go index 5484a880fd4..9b6392ce4b7 100644 --- a/node/status_node_rpc_client_test.go +++ b/node/status_node_rpc_client_test.go @@ -10,10 +10,12 @@ import ( "testing" "github.com/stretchr/testify/require" + "go.uber.org/zap" "github.com/status-im/status-go/appdatabase" "github.com/status-im/status-go/multiaccounts" "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol/tt" "github.com/status-im/status-go/t/helpers" "github.com/status-im/status-go/walletdatabase" ) @@ -66,13 +68,13 @@ func setupTestMultiDB() (*multiaccounts.Database, func() error, error) { } func createAndStartStatusNode(config *params.NodeConfig) (*StatusNode, error) { - statusNode := New(nil) + statusNode := New(nil, tt.MustCreateTestLogger()) appDB, walletDB, stop, err := setupTestDBs() defer func() { err := stop() if err != nil { - statusNode.log.Error("stopping db", err) + statusNode.logger.Error("stopping db", zap.Error(err)) } }() if err != nil { @@ -85,7 +87,7 @@ func createAndStartStatusNode(config *params.NodeConfig) (*StatusNode, error) { defer func() { err := stop2() if err != nil { - statusNode.log.Error("stopping multiaccount db", err) + statusNode.logger.Error("stopping multiaccount db", zap.Error(err)) } }() if err != nil { @@ -106,7 +108,7 @@ func createStatusNode() (*StatusNode, func() error, func() error, error) { if err != nil { return nil, nil, nil, err } - statusNode := New(nil) + statusNode := New(nil, tt.MustCreateTestLogger()) statusNode.SetAppDB(appDB) statusNode.SetWalletDB(walletDB) diff --git a/node/status_node_services.go b/node/status_node_services.go index 16539d21f77..48406c055c4 100644 --- a/node/status_node_services.go +++ b/node/status_node_services.go @@ -10,6 +10,8 @@ import ( "reflect" "time" + "go.uber.org/zap" + "github.com/status-im/status-go/protocol/common/shard" "github.com/status-im/status-go/server" "github.com/status-im/status-go/signal" @@ -657,7 +659,7 @@ func (b *StatusNode) StopLocalNotifications() error { if b.localNotificationsSrvc.IsStarted() { err := b.localNotificationsSrvc.Stop() if err != nil { - b.log.Error("LocalNotifications service stop failed on StopLocalNotifications", "error", err) + b.logger.Error("LocalNotifications service stop failed on StopLocalNotifications", zap.Error(err)) return nil } } @@ -678,7 +680,7 @@ func (b *StatusNode) StartLocalNotifications() error { err := b.localNotificationsSrvc.Start() if err != nil { - b.log.Error("LocalNotifications service start failed on StartLocalNotifications", "error", err) + b.logger.Error("LocalNotifications service start failed on StartLocalNotifications", zap.Error(err)) return nil } } @@ -686,7 +688,7 @@ func (b *StatusNode) StartLocalNotifications() error { err := b.localNotificationsSrvc.SubscribeWallet(&b.walletFeed) if err != nil { - b.log.Error("LocalNotifications service could not subscribe to wallet on StartLocalNotifications", "error", err) + b.logger.Error("LocalNotifications service could not subscribe to wallet on StartLocalNotifications", zap.Error(err)) return nil } diff --git a/params/config.go b/params/config.go index bfa937c0ce3..98b51d6526c 100644 --- a/params/config.go +++ b/params/config.go @@ -11,15 +11,16 @@ import ( "strings" "time" + "go.uber.org/zap" validator "gopkg.in/go-playground/validator.v9" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/params" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/static" wakucommon "github.com/status-im/status-go/waku/common" wakuv2common "github.com/status-im/status-go/wakuv2/common" @@ -409,8 +410,6 @@ type NodeConfig struct { // handshake phase, counted separately for inbound and outbound connections. MaxPendingPeers int - log log.Logger - // LogEnabled enables the logger LogEnabled bool `json:"LogEnabled"` @@ -807,7 +806,7 @@ func (c *NodeConfig) setDefaultPushNotificationsServers() error { // If empty load defaults from the fleet if len(c.ClusterConfig.PushNotificationsServers) == 0 { - log.Debug("empty push notification servers, setting", "fleet", c.ClusterConfig.Fleet) + logutils.ZapLogger().Debug("empty push notification servers, setting", zap.String("fleet", c.ClusterConfig.Fleet)) defaultConfig := &NodeConfig{} err := loadConfigFromAsset(fmt.Sprintf("../config/cli/fleet-%s.json", c.ClusterConfig.Fleet), defaultConfig) if err != nil { @@ -818,7 +817,7 @@ func (c *NodeConfig) setDefaultPushNotificationsServers() error { // If empty set the default servers if len(c.ShhextConfig.DefaultPushNotificationsServers) == 0 { - log.Debug("setting default push notification servers", "cluster servers", c.ClusterConfig.PushNotificationsServers) + logutils.ZapLogger().Debug("setting default push notification servers", zap.Strings("cluster servers", c.ClusterConfig.PushNotificationsServers)) for _, pk := range c.ClusterConfig.PushNotificationsServers { keyBytes, err := hex.DecodeString("04" + pk) if err != nil { @@ -929,7 +928,6 @@ func NewNodeConfig(dataDir string, networkID uint64) (*NodeConfig, error) { MaxPeers: 25, MaxPendingPeers: 0, IPCFile: "geth.ipc", - log: log.New("package", "status-go/params.NodeConfig"), LogFile: "", LogLevel: "ERROR", NoDiscovery: true, @@ -1159,7 +1157,6 @@ func (c *NodeConfig) Save() error { return err } - c.log.Info("config file saved", "path", configFilePath) return nil } diff --git a/peers/cache.go b/peers/cache.go index f99e8071928..280df84826a 100644 --- a/peers/cache.go +++ b/peers/cache.go @@ -3,12 +3,13 @@ package peers import ( "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/util" + "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/status-im/status-go/db" + "github.com/status-im/status-go/logutils" ) // NewCache returns instance of PeersDatabase @@ -55,7 +56,7 @@ func (d *Cache) GetPeersRange(topic discv5.Topic, limit int) (nodes []*discv5.No node := discv5.Node{} value := iterator.Value() if err := node.UnmarshalText(value); err != nil { - log.Error("can't unmarshal node", "value", value, "error", err) + logutils.ZapLogger().Error("can't unmarshal node", zap.Binary("value", value), zap.Error(err)) continue } nodes = append(nodes, &node) diff --git a/peers/peerpool.go b/peers/peerpool.go index 5af3cfc72f1..bf0b48f9d68 100644 --- a/peers/peerpool.go +++ b/peers/peerpool.go @@ -6,14 +6,16 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/status-im/status-go/common" "github.com/status-im/status-go/discovery" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/peers/verifier" "github.com/status-im/status-go/signal" @@ -205,7 +207,7 @@ func (p *PeerPool) stopDiscovery(server *p2p.Server) { } if err := p.discovery.Stop(); err != nil { - log.Error("discovery errored when stopping", "err", err) + logutils.ZapLogger().Error("discovery errored when stopping", zap.Error(err)) } for _, t := range p.topics { t.StopSearch(server) @@ -224,7 +226,7 @@ func (p *PeerPool) restartDiscovery(server *p2p.Server) error { if err := p.startDiscovery(); err != nil { return err } - log.Debug("restarted discovery from peer pool") + logutils.ZapLogger().Debug("restarted discovery from peer pool") } for _, t := range p.topics { if !t.BelowMin() || t.SearchRunning() { @@ -232,7 +234,7 @@ func (p *PeerPool) restartDiscovery(server *p2p.Server) error { } err := t.StartSearch(server) if err != nil { - log.Error("search failed to start", "error", err) + logutils.ZapLogger().Error("search failed to start", zap.Error(err)) } } return nil @@ -283,15 +285,15 @@ func (p *PeerPool) handleServerPeers(server *p2p.Server, events <-chan *p2p.Peer select { case <-p.quit: - log.Debug("stopping DiscV5 because of quit") + logutils.ZapLogger().Debug("stopping DiscV5 because of quit") p.stopDiscovery(server) return case <-timeout: - log.Info("DiscV5 timed out") + logutils.ZapLogger().Info("DiscV5 timed out") p.stopDiscovery(server) case <-retryDiscv5: if err := p.restartDiscovery(server); err != nil { - log.Error("starting discv5 failed", "error", err, "retry", discoveryRestartTimeout) + logutils.ZapLogger().Error("starting discv5 failed", zap.Duration("retry", discoveryRestartTimeout), zap.Error(err)) queueRetry(discoveryRestartTimeout) } case <-stopDiscv5: @@ -320,12 +322,12 @@ func (p *PeerPool) handlePeerEventType(server *p2p.Server, event *p2p.PeerEvent, var shouldStop bool switch event.Type { case p2p.PeerEventTypeDrop: - log.Debug("confirm peer dropped", "ID", event.Peer) + logutils.ZapLogger().Debug("confirm peer dropped", zap.Stringer("ID", event.Peer)) if p.handleDroppedPeer(server, event.Peer) { shouldRetry = true } case p2p.PeerEventTypeAdd: // skip other events - log.Debug("confirm peer added", "ID", event.Peer) + logutils.ZapLogger().Debug("confirm peer added", zap.Stringer("ID", event.Peer)) p.handleAddedPeer(server, event.Peer) shouldStop = true default: @@ -366,7 +368,7 @@ func (p *PeerPool) handleStopTopics(server *p2p.Server) { } } if p.allTopicsStopped() { - log.Debug("closing discv5 connection because all topics reached max limit") + logutils.ZapLogger().Debug("closing discv5 connection because all topics reached max limit") p.stopDiscovery(server) } } @@ -393,10 +395,10 @@ func (p *PeerPool) handleDroppedPeer(server *p2p.Server, nodeID enode.ID) (any b if confirmed { newPeer := t.AddPeerFromTable(server) if newPeer != nil { - log.Debug("added peer from local table", "ID", newPeer.ID) + logutils.ZapLogger().Debug("added peer from local table", zap.Stringer("ID", newPeer.ID)) } } - log.Debug("search", "topic", t.Topic(), "below min", t.BelowMin()) + logutils.ZapLogger().Debug("search", zap.String("topic", string(t.Topic())), zap.Bool("below min", t.BelowMin())) if t.BelowMin() && !t.SearchRunning() { any = true } @@ -415,7 +417,7 @@ func (p *PeerPool) Stop() { case <-p.quit: return default: - log.Debug("started closing peer pool") + logutils.ZapLogger().Debug("started closing peer pool") close(p.quit) } p.serverSubscription.Unsubscribe() diff --git a/peers/topic_register.go b/peers/topic_register.go index 2bd326ef783..47389804b6d 100644 --- a/peers/topic_register.go +++ b/peers/topic_register.go @@ -3,11 +3,13 @@ package peers import ( "sync" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/status-im/status-go/common" "github.com/status-im/status-go/discovery" + "github.com/status-im/status-go/logutils" ) // Register manages register topic queries @@ -34,9 +36,9 @@ func (r *Register) Start() error { r.wg.Add(1) go func(t discv5.Topic) { defer common.LogOnPanic() - log.Debug("v5 register topic", "topic", t) + logutils.ZapLogger().Debug("v5 register topic", zap.String("topic", string(t))) if err := r.discovery.Register(string(t), r.quit); err != nil { - log.Error("error registering topic", "topic", t, "error", err) + logutils.ZapLogger().Error("error registering topic", zap.String("topic", string(t)), zap.Error(err)) } r.wg.Done() }(topic) @@ -55,6 +57,6 @@ func (r *Register) Stop() { default: close(r.quit) } - log.Debug("waiting for register queries to exit") + logutils.ZapLogger().Debug("waiting for register queries to exit") r.wg.Wait() } diff --git a/peers/topicpool.go b/peers/topicpool.go index a7f008035d7..69dad1394f4 100644 --- a/peers/topicpool.go +++ b/peers/topicpool.go @@ -6,13 +6,15 @@ import ( "sync/atomic" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/status-im/status-go/common" "github.com/status-im/status-go/discovery" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" ) @@ -315,7 +317,7 @@ func (t *TopicPool) ConfirmAdded(server *p2p.Server, nodeID enode.ID) { peerInfoItem, ok := t.pendingPeers[nodeID] inbound := !ok || !peerInfoItem.added - log.Debug("peer added event", "peer", nodeID.String(), "inbound", inbound) + logutils.ZapLogger().Debug("peer added event", zap.Stringer("peer", nodeID), zap.Bool("inbound", inbound)) if inbound { return @@ -326,13 +328,13 @@ func (t *TopicPool) ConfirmAdded(server *p2p.Server, nodeID enode.ID) { // established connection means that the node // is a viable candidate for a connection and can be cached if err := t.cache.AddPeer(peer.node, t.topic); err != nil { - log.Error("failed to persist a peer", "error", err) + logutils.ZapLogger().Error("failed to persist a peer", zap.Error(err)) } t.movePeerFromPoolToConnected(nodeID) // if the upper limit is already reached, drop this peer if len(t.connectedPeers) > t.limits.Max { - log.Debug("max limit is reached drop the peer", "ID", nodeID, "topic", t.topic) + logutils.ZapLogger().Debug("max limit is reached drop the peer", zap.Stringer("ID", nodeID), zap.String("topic", string(t.topic))) peer.dismissed = true t.removeServerPeer(server, peer) return @@ -364,7 +366,7 @@ func (t *TopicPool) ConfirmDropped(server *p2p.Server, nodeID enode.ID) bool { return false } - log.Debug("disconnect", "ID", nodeID, "dismissed", peer.dismissed) + logutils.ZapLogger().Debug("disconnect", zap.Stringer("ID", nodeID), zap.Bool("dismissed", peer.dismissed)) delete(t.connectedPeers, nodeID) // Peer was removed by us because exceeded the limit. @@ -382,7 +384,7 @@ func (t *TopicPool) ConfirmDropped(server *p2p.Server, nodeID enode.ID) bool { t.removeServerPeer(server, peer) if err := t.cache.RemovePeer(nodeID, t.topic); err != nil { - log.Error("failed to remove peer from cache", "error", err) + logutils.ZapLogger().Error("failed to remove peer from cache", zap.Error(err)) } // As we removed a peer, update a sync strategy if needed. @@ -437,7 +439,7 @@ func (t *TopicPool) StartSearch(server *p2p.Server) error { lookup := make(chan bool, 10) // sufficiently buffered channel, just prevents blocking because of lookup for _, peer := range t.cache.GetPeersRange(t.topic, 5) { - log.Debug("adding a peer from cache", "peer", peer) + logutils.ZapLogger().Debug("adding a peer from cache", zap.Stringer("peer", peer)) found <- peer } @@ -445,7 +447,7 @@ func (t *TopicPool) StartSearch(server *p2p.Server) error { go func() { defer common.LogOnPanic() if err := t.discovery.Discover(string(t.topic), t.period, found, lookup); err != nil { - log.Error("error searching foro", "topic", t.topic, "err", err) + logutils.ZapLogger().Error("error searching foro", zap.String("topic", string(t.topic)), zap.Error(err)) } t.discWG.Done() }() @@ -471,7 +473,7 @@ func (t *TopicPool) handleFoundPeers(server *p2p.Server, found <-chan *discv5.No continue } if err := t.processFoundNode(server, node); err != nil { - log.Error("failed to process found node", "node", node, "error", err) + logutils.ZapLogger().Error("failed to process found node", zap.Stringer("node", node), zap.Error(err)) } } } @@ -493,7 +495,7 @@ func (t *TopicPool) processFoundNode(server *p2p.Server, node *discv5.Node) erro nodeID := enode.PubkeyToIDV4(pk) - log.Debug("peer found", "ID", nodeID, "topic", t.topic) + logutils.ZapLogger().Debug("peer found", zap.Stringer("ID", nodeID), zap.String("topic", string(t.topic))) // peer is already connected so update only discoveredTime if peer, ok := t.connectedPeers[nodeID]; ok { @@ -510,9 +512,9 @@ func (t *TopicPool) processFoundNode(server *p2p.Server, node *discv5.Node) erro publicKey: pk, }) } - log.Debug( - "adding peer to a server", "peer", node.ID.String(), - "connected", len(t.connectedPeers), "max", t.maxCachedPeers) + logutils.ZapLogger().Debug( + "adding peer to a server", zap.Stringer("peer", node.ID), + zap.Int("connected", len(t.connectedPeers)), zap.Int("max", t.maxCachedPeers)) // This can happen when the monotonic clock is not precise enough and // multiple peers gets added at the same clock time, resulting in all @@ -525,7 +527,7 @@ func (t *TopicPool) processFoundNode(server *p2p.Server, node *discv5.Node) erro // This has been reported on windows builds // only https://github.com/status-im/nim-status-client/issues/522 if t.pendingPeers[nodeID] == nil { - log.Debug("peer added has just been removed", "peer", nodeID) + logutils.ZapLogger().Debug("peer added has just been removed", zap.Stringer("peer", nodeID)) return nil } @@ -570,7 +572,7 @@ func (t *TopicPool) StopSearch(server *p2p.Server) { return default: } - log.Debug("stoping search", "topic", t.topic) + logutils.ZapLogger().Debug("stoping search", zap.String("topic", string(t.topic))) close(t.quit) t.mu.Lock() if t.fastModeTimeoutCancel != nil { diff --git a/profiling/profiler.go b/profiling/profiler.go index a8018e8382f..0820d04331d 100644 --- a/profiling/profiler.go +++ b/profiling/profiler.go @@ -6,8 +6,10 @@ import ( hpprof "net/http/pprof" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) // Profiler runs and controls a HTTP pprof interface. @@ -38,7 +40,7 @@ func NewProfiler(port int) *Profiler { func (p *Profiler) Go() { go func() { defer common.LogOnPanic() - log.Info("debug server stopped", "err", p.server.ListenAndServe()) + logutils.ZapLogger().Info("debug server stopped", zap.Error(p.server.ListenAndServe())) }() - log.Info("debug server started") + logutils.ZapLogger().Info("debug server started") } diff --git a/protocol/discord/assets.go b/protocol/discord/assets.go index 19e59ae2aa8..b42f3cacfed 100644 --- a/protocol/discord/assets.go +++ b/protocol/discord/assets.go @@ -5,8 +5,10 @@ import ( "net/http" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/status-im/status-go/images" + "github.com/status-im/status-go/logutils" ) func DownloadAvatarAsset(url string) ([]byte, error) { @@ -26,7 +28,7 @@ func DownloadAsset(url string) ([]byte, string, error) { } defer func() { if err := res.Body.Close(); err != nil { - log.Error("failed to close message asset http request body", "err", err) + logutils.ZapLogger().Error("failed to close message asset http request body", zap.Error(err)) } }() diff --git a/protocol/encryption/publisher/publisher.go b/protocol/encryption/publisher/publisher.go index e7b58707499..5c450cf69a2 100644 --- a/protocol/encryption/publisher/publisher.go +++ b/protocol/encryption/publisher/publisher.go @@ -73,6 +73,7 @@ func (p *Publisher) Stop() { } func (p *Publisher) tickerLoop() { + defer gocommon.LogOnPanic() ticker := time.NewTicker(tickerInterval * time.Second) go func() { diff --git a/protocol/messenger.go b/protocol/messenger.go index d48dca99885..7d9c455f8c0 100644 --- a/protocol/messenger.go +++ b/protocol/messenger.go @@ -581,7 +581,7 @@ func NewMessenger( if c.wakuService != nil { c.wakuService.SetStatusTelemetryClient(telemetryClient) } - go telemetryClient.Start(ctx) + telemetryClient.Start(ctx) } messenger = &Messenger{ @@ -916,7 +916,7 @@ func (m *Messenger) Start() (*MessengerResponse, error) { for _, c := range controlledCommunities { if c.Joined() && c.HasTokenPermissions() { - go m.communitiesManager.StartMembersReevaluationLoop(c.ID(), false) + m.communitiesManager.StartMembersReevaluationLoop(c.ID(), false) } } diff --git a/protocol/messenger_contact_verification.go b/protocol/messenger_contact_verification.go index b0d3328b69e..af6c6c8fa3d 100644 --- a/protocol/messenger_contact_verification.go +++ b/protocol/messenger_contact_verification.go @@ -24,6 +24,10 @@ import ( const minContactVerificationMessageLen = 1 const maxContactVerificationMessageLen = 280 +var ( + ErrContactNotMutual = errors.New("must be a mutual contact") +) + func (m *Messenger) SendContactVerificationRequest(ctx context.Context, contactID string, challenge string) (*MessengerResponse, error) { if len(challenge) < minContactVerificationMessageLen || len(challenge) > maxContactVerificationMessageLen { return nil, errors.New("invalid verification request challenge length") @@ -31,7 +35,7 @@ func (m *Messenger) SendContactVerificationRequest(ctx context.Context, contactI contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } verifRequest := &verification.Request{ @@ -138,7 +142,7 @@ func (m *Messenger) SendContactVerificationRequest(ctx context.Context, contactI func (m *Messenger) GetVerificationRequestSentTo(ctx context.Context, contactID string) (*verification.Request, error) { _, ok := m.allContacts.Load(contactID) if !ok { - return nil, errors.New("contact not found") + return nil, ErrContactNotFound } return m.verificationDatabase.GetLatestVerificationRequestSentTo(contactID) @@ -279,7 +283,7 @@ func (m *Messenger) AcceptContactVerificationRequest(ctx context.Context, id str contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } chat, ok := m.allChats.Load(contactID) @@ -394,7 +398,7 @@ func (m *Messenger) VerifiedTrusted(ctx context.Context, request *requests.Verif contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } err = m.setTrustStatusForContact(context.Background(), contactID, verification.TrustStatusTRUSTED) @@ -589,7 +593,7 @@ func (m *Messenger) DeclineContactVerificationRequest(ctx context.Context, id st contact, ok := m.allContacts.Load(verifRequest.From) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } contactID := verifRequest.From contact, err = m.setContactVerificationStatus(contactID, VerificationStatusVERIFIED) @@ -686,7 +690,7 @@ func (m *Messenger) DeclineContactVerificationRequest(ctx context.Context, id st func (m *Messenger) setContactVerificationStatus(contactID string, verificationStatus VerificationStatus) (*Contact, error) { contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } contact.VerificationStatus = verificationStatus @@ -714,6 +718,11 @@ func (m *Messenger) setContactVerificationStatus(contactID string, verificationS } func (m *Messenger) setTrustStatusForContact(ctx context.Context, contactID string, trustStatus verification.TrustStatus) error { + contact, ok := m.allContacts.Load(contactID) + if !ok { + return ErrContactNotFound + } + currentTime := m.getTimesource().GetCurrentTime() err := m.verificationDatabase.SetTrustStatus(contactID, trustStatus, currentTime) @@ -721,6 +730,9 @@ func (m *Messenger) setTrustStatusForContact(ctx context.Context, contactID stri return err } + contact.TrustStatus = trustStatus + m.allContacts.Store(contactID, contact) + return m.SyncTrustedUser(ctx, contactID, trustStatus, m.dispatchMessage) } @@ -784,7 +796,7 @@ func (m *Messenger) HandleRequestContactVerification(state *ReceivedMessageState contact := state.CurrentMessageState.Contact if !contact.mutual() { m.logger.Debug("Received a verification request for a non added mutual contact", zap.String("contactID", contactID)) - return errors.New("must be a mutual contact") + return ErrContactNotMutual } persistedVR, err := m.verificationDatabase.GetVerificationRequest(id) @@ -875,7 +887,7 @@ func (m *Messenger) HandleAcceptContactVerification(state *ReceivedMessageState, contact := state.CurrentMessageState.Contact if !contact.mutual() { m.logger.Debug("Received a verification response for a non mutual contact", zap.String("contactID", contactID)) - return errors.New("must be a mutual contact") + return ErrContactNotMutual } persistedVR, err := m.verificationDatabase.GetVerificationRequest(request.Id) @@ -964,7 +976,7 @@ func (m *Messenger) HandleDeclineContactVerification(state *ReceivedMessageState contact := state.CurrentMessageState.Contact if !contact.mutual() { m.logger.Debug("Received a verification decline for a non mutual contact", zap.String("contactID", contactID)) - return errors.New("must be a mutual contact") + return ErrContactNotMutual } persistedVR, err := m.verificationDatabase.GetVerificationRequest(request.Id) diff --git a/protocol/messenger_contact_verification_test.go b/protocol/messenger_contact_verification_test.go index ee46692bbeb..8ff0ff69613 100644 --- a/protocol/messenger_contact_verification_test.go +++ b/protocol/messenger_contact_verification_test.go @@ -769,3 +769,50 @@ func (s *MessengerVerificationRequests) newMessenger(shh types.Waku) *Messenger s.Require().NoError(err) return messenger } + +func (s *MessengerVerificationRequests) TestTrustStatus() { + theirMessenger := s.newMessenger(s.shh) + defer TearDownMessenger(&s.Suite, theirMessenger) + + s.mutualContact(theirMessenger) + + theirPk := types.EncodeHex(crypto.FromECDSAPub(&theirMessenger.identity.PublicKey)) + + // Test Mark as Trusted + err := s.m.MarkAsTrusted(context.Background(), theirPk) + s.Require().NoError(err) + + contactFromCache, ok := s.m.allContacts.Load(theirPk) + s.Require().True(ok) + s.Require().Equal(verification.TrustStatusTRUSTED, contactFromCache.TrustStatus) + trustStatusFromDb, err := s.m.GetTrustStatus(theirPk) + s.Require().NoError(err) + s.Require().Equal(verification.TrustStatusTRUSTED, trustStatusFromDb) + + // Test Remove Trust Mark + err = s.m.RemoveTrustStatus(context.Background(), theirPk) + s.Require().NoError(err) + + contactFromCache, ok = s.m.allContacts.Load(theirPk) + s.Require().True(ok) + s.Require().Equal(verification.TrustStatusUNKNOWN, contactFromCache.TrustStatus) + trustStatusFromDb, err = s.m.GetTrustStatus(theirPk) + s.Require().NoError(err) + s.Require().Equal(verification.TrustStatusUNKNOWN, trustStatusFromDb) + + // Test Mark as Untrustoworthy + err = s.m.MarkAsUntrustworthy(context.Background(), theirPk) + s.Require().NoError(err) + + contactFromCache, ok = s.m.allContacts.Load(theirPk) + s.Require().True(ok) + s.Require().Equal(verification.TrustStatusUNTRUSTWORTHY, contactFromCache.TrustStatus) + trustStatusFromDb, err = s.m.GetTrustStatus(theirPk) + s.Require().NoError(err) + s.Require().Equal(verification.TrustStatusUNTRUSTWORTHY, trustStatusFromDb) + + // Test calling with an unknown contact + err = s.m.MarkAsTrusted(context.Background(), "0x00000123") + s.Require().Error(err) + s.Require().Equal("contact not found", err.Error()) +} diff --git a/protocol/messenger_contacts.go b/protocol/messenger_contacts.go index 011758f8d1d..f9b15ddc2bc 100644 --- a/protocol/messenger_contacts.go +++ b/protocol/messenger_contacts.go @@ -9,11 +9,10 @@ import ( "github.com/golang/protobuf/proto" "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" - "github.com/status-im/status-go/deprecation" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" multiaccountscommon "github.com/status-im/status-go/multiaccounts/common" "github.com/status-im/status-go/protocol/common" "github.com/status-im/status-go/protocol/protobuf" @@ -1337,7 +1336,7 @@ func (m *Messenger) publishSelfContactSubscriptions(event *SelfContactChangeEven select { case s <- event: default: - log.Warn("self contact subscription channel full, dropping message") + logutils.ZapLogger().Warn("self contact subscription channel full, dropping message") } } } diff --git a/protocol/messenger_store_node_request_manager.go b/protocol/messenger_store_node_request_manager.go index 84401003f2f..f0791b2bc3f 100644 --- a/protocol/messenger_store_node_request_manager.go +++ b/protocol/messenger_store_node_request_manager.go @@ -500,6 +500,8 @@ func (r *storeNodeRequest) shouldFetchNextPage(envelopesCount int) (bool, uint32 } func (r *storeNodeRequest) routine() { + defer gocommon.LogOnPanic() + r.manager.logger.Info("starting store node request", zap.Any("requestID", r.requestID), zap.String("pubsubTopic", r.pubsubTopic), diff --git a/protocol/persistence.go b/protocol/persistence.go index 818a0442421..49543c9b4b3 100644 --- a/protocol/persistence.go +++ b/protocol/persistence.go @@ -10,14 +10,14 @@ import ( "time" "github.com/pkg/errors" - - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" "github.com/mat/besticon/besticon" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/images" userimage "github.com/status-im/status-go/images" + "github.com/status-im/status-go/logutils" multiaccountscommon "github.com/status-im/status-go/multiaccounts/common" "github.com/status-im/status-go/protocol/common" @@ -1323,7 +1323,7 @@ func (db *sqlitePersistence) AddBookmark(bookmark browsers.Bookmark) (browsers.B bookmark.ImageURL = icons[0].URL } } else { - log.Error("error getting the bookmark icon", "iconError", iconError) + logutils.ZapLogger().Error("error getting the bookmark icon", zap.Error(iconError)) } _, err = insert.Exec(bookmark.URL, bookmark.Name, bookmark.ImageURL, bookmark.Removed, bookmark.Clock) diff --git a/protocol/pushnotificationserver/server.go b/protocol/pushnotificationserver/server.go index 8761d676894..5b48a301293 100644 --- a/protocol/pushnotificationserver/server.go +++ b/protocol/pushnotificationserver/server.go @@ -19,7 +19,7 @@ import ( ) const encryptedPayloadKeyLength = 16 -const defaultGorushURL = "https://gorush.status.im" +const defaultGorushURL = "https://gorush.infra.status.im/" var errUnhandledPushNotificationType = errors.New("unhandled push notification type") diff --git a/protocol/requests/create_account.go b/protocol/requests/create_account.go index d1aa4555c07..45a0e87281b 100644 --- a/protocol/requests/create_account.go +++ b/protocol/requests/create_account.go @@ -66,7 +66,7 @@ type CreateAccount struct { // If you want to use non-default network, use NetworkID. CurrentNetwork string `json:"currentNetwork"` NetworkID *uint64 `json:"networkId"` - TestOverrideNetworks []params.Network `json:"-"` // This is used for testing purposes only + TestOverrideNetworks []params.Network `json:"networksOverride"` // This is used for testing purposes only TestNetworksEnabled bool `json:"testNetworksEnabled"` diff --git a/protocol/requests/create_community_request.go b/protocol/requests/create_community_request.go index 4ddc4f7e1c7..e5a782a45a1 100644 --- a/protocol/requests/create_community_request.go +++ b/protocol/requests/create_community_request.go @@ -3,8 +3,10 @@ package requests import ( "errors" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/status-im/status-go/images" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/protocol/protobuf" ) @@ -96,7 +98,7 @@ func (c *CreateCommunity) ToCommunityDescription() (*protobuf.CommunityDescripti if c.Image != "" || c.Banner.ImagePath != "" { ciis := make(map[string]*protobuf.IdentityImage) if c.Image != "" { - log.Info("has-image", "image", c.Image) + logutils.ZapLogger().Debug("has-image", zap.String("image", c.Image)) imgs, err := images.GenerateIdentityImages(c.Image, c.ImageAx, c.ImageAy, c.ImageBx, c.ImageBy) if err != nil { return nil, err @@ -106,7 +108,7 @@ func (c *CreateCommunity) ToCommunityDescription() (*protobuf.CommunityDescripti } } if c.Banner.ImagePath != "" { - log.Info("has-banner", "image", c.Banner.ImagePath) + logutils.ZapLogger().Debug("has-banner", zap.String("image", c.Banner.ImagePath)) img, err := images.GenerateBannerImage(c.Banner.ImagePath, c.Banner.X, c.Banner.Y, c.Banner.X+c.Banner.Width, c.Banner.Y+c.Banner.Height) if err != nil { return nil, err @@ -114,7 +116,7 @@ func (c *CreateCommunity) ToCommunityDescription() (*protobuf.CommunityDescripti ciis[img.Name] = adaptIdentityImageToProtobuf(*img) } ci.Images = ciis - log.Info("set images", "images", ci) + logutils.ZapLogger().Debug("set images", zap.Any("images", ci)) } description := &protobuf.CommunityDescription{ diff --git a/protocol/v1/status_message.go b/protocol/v1/status_message.go index 1c5ff96a584..634360a3984 100644 --- a/protocol/v1/status_message.go +++ b/protocol/v1/status_message.go @@ -7,12 +7,13 @@ import ( "github.com/golang/protobuf/proto" "github.com/jinzhu/copier" "github.com/pkg/errors" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" utils "github.com/status-im/status-go/common" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/protocol/encryption" "github.com/status-im/status-go/protocol/encryption/multidevice" "github.com/status-im/status-go/protocol/encryption/sharedsecret" @@ -166,7 +167,10 @@ func (m *StatusMessage) HandleApplicationLayer() error { m.ApplicationLayer.SigPubKey = recoveredKey // Calculate ID using the wrapped record m.ApplicationLayer.ID = MessageID(recoveredKey, m.EncryptionLayer.Payload) - log.Debug("calculated ID for envelope", "envelopeHash", hexutil.Encode(m.TransportLayer.Hash), "messageId", hexutil.Encode(m.ApplicationLayer.ID)) + logutils.ZapLogger().Debug("calculated ID for envelope", + zap.String("envelopeHash", hexutil.Encode(m.TransportLayer.Hash)), + zap.String("messageId", hexutil.Encode(m.ApplicationLayer.ID)), + ) m.ApplicationLayer.Payload = message.Payload m.ApplicationLayer.Type = message.Type diff --git a/rpc/call_raw.go b/rpc/call_raw.go index 42873334eae..d4e377c536f 100644 --- a/rpc/call_raw.go +++ b/rpc/call_raw.go @@ -4,6 +4,8 @@ import ( "context" "encoding/json" + "go.uber.org/zap" + gethrpc "github.com/ethereum/go-ethereum/rpc" ) @@ -101,7 +103,7 @@ func (c *Client) callBatchMethods(ctx context.Context, msgs json.RawMessage) str data, err := json.Marshal(responses) if err != nil { - c.log.Error("Failed to marshal batch responses:", "error", err) + c.logger.Error("Failed to marshal batch responses:", zap.Error(err)) return newErrorResponse(errInvalidMessageCode, err, defaultMsgID) } diff --git a/rpc/chain/client.go b/rpc/chain/client.go index 226c5aa73bb..ba06bd0bd90 100644 --- a/rpc/chain/client.go +++ b/rpc/chain/client.go @@ -11,17 +11,19 @@ import ( "sync/atomic" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/status-im/status-go/circuitbreaker" "github.com/status-im/status-go/healthmanager" "github.com/status-im/status-go/healthmanager/rpcstatus" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc/chain/ethclient" "github.com/status-im/status-go/rpc/chain/rpclimiter" "github.com/status-im/status-go/rpc/chain/tagger" @@ -805,10 +807,10 @@ func (c *ClientWithFallback) toggleConnectionState(err error) { connected := true if err != nil { if !isNotFoundError(err) && !isVMError(err) && !errors.Is(err, rpclimiter.ErrRequestsOverLimit) && !errors.Is(err, context.Canceled) { - log.Warn("Error not in chain call", "error", err, "chain", c.ChainID) + logutils.ZapLogger().Warn("Error not in chain call", zap.Uint64("chain", c.ChainID), zap.Error(err)) connected = false } else { - log.Warn("Error in chain call", "error", err) + logutils.ZapLogger().Warn("Error in chain call", zap.Error(err)) } } c.SetIsConnected(connected) diff --git a/rpc/chain/rpclimiter/rpc_limiter.go b/rpc/chain/rpclimiter/rpc_limiter.go index e33b10b0004..8c297098343 100644 --- a/rpc/chain/rpclimiter/rpc_limiter.go +++ b/rpc/chain/rpclimiter/rpc_limiter.go @@ -8,9 +8,10 @@ import ( "time" "github.com/google/uuid" + "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) const ( @@ -132,7 +133,7 @@ func NewRequestLimiter(storage LimitsStorage) *RPCRequestLimiter { func (rl *RPCRequestLimiter) SetLimit(tag string, maxRequests int, interval time.Duration) error { err := rl.saveToStorage(tag, maxRequests, interval, 0, time.Now()) if err != nil { - log.Error("Failed to save request data to storage", "error", err) + logutils.ZapLogger().Error("Failed to save request data to storage", zap.Error(err)) return err } @@ -151,7 +152,7 @@ func (rl *RPCRequestLimiter) GetLimit(tag string) (*LimitData, error) { func (rl *RPCRequestLimiter) DeleteLimit(tag string) error { err := rl.storage.Delete(tag) if err != nil { - log.Error("Failed to delete request data from storage", "error", err) + logutils.ZapLogger().Error("Failed to delete request data from storage", zap.Error(err)) return err } @@ -169,7 +170,7 @@ func (rl *RPCRequestLimiter) saveToStorage(tag string, maxRequests int, interval err := rl.storage.Set(data) if err != nil { - log.Error("Failed to save request data to storage", "error", err) + logutils.ZapLogger().Error("Failed to save request data to storage", zap.Error(err)) return err } @@ -202,12 +203,12 @@ func (rl *RPCRequestLimiter) Allow(tag string) (bool, error) { // Check if a number of requests is over the limit within the interval if time.Since(data.CreatedAt) < data.Period || data.Period.Milliseconds() == LimitInfinitely { if data.NumReqs >= data.MaxReqs { - log.Info("Number of requests over limit", - "tag", tag, - "numReqs", data.NumReqs, - "maxReqs", data.MaxReqs, - "period", data.Period, - "createdAt", data.CreatedAt.UTC()) + logutils.ZapLogger().Info("Number of requests over limit", + zap.String("tag", tag), + zap.Int("numReqs", data.NumReqs), + zap.Int("maxReqs", data.MaxReqs), + zap.Duration("period", data.Period), + zap.Time("createdAt", data.CreatedAt.UTC())) return false, ErrRequestsOverLimit } diff --git a/rpc/client.go b/rpc/client.go index b492a84b440..a1d66cb5195 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -16,12 +16,14 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + gethrpc "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/event" appCommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/healthmanager" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/rpc/chain/ethclient" @@ -107,7 +109,7 @@ type Client struct { handlersMx sync.RWMutex // mx guards handlers handlers map[string]Handler // locally registered handlers - log log.Logger + logger *zap.Logger walletNotifier func(chainID uint64, message string) providerConfigs []params.ProviderConfig @@ -133,7 +135,7 @@ type ClientConfig struct { func NewClient(config ClientConfig) (*Client, error) { var err error - log := log.New("package", "status-go/rpc.Client") + logger := logutils.ZapLogger().Named("rpcClient") networkManager := network.NewManager(config.DB) if networkManager == nil { return nil, errors.New("failed to create network manager") @@ -141,7 +143,7 @@ func NewClient(config ClientConfig) (*Client, error) { err = networkManager.Init(config.Networks) if err != nil { - log.Error("Network manager failed to initialize", "error", err) + logger.Error("Network manager failed to initialize", zap.Error(err)) } c := Client{ @@ -150,7 +152,7 @@ func NewClient(config ClientConfig) (*Client, error) { handlers: make(map[string]Handler), rpcClients: make(map[uint64]chain.ClientInterface), limiterPerProvider: make(map[string]*rpclimiter.RPCRpsLimiter), - log: log, + logger: logger, providerConfigs: config.ProviderConfigs, healthMgr: healthmanager.NewBlockchainHealthManager(), walletFeed: config.WalletFeed, @@ -168,7 +170,7 @@ func NewClient(config ClientConfig) (*Client, error) { func (c *Client) Start(ctx context.Context) { if c.stopMonitoringFunc != nil { - c.log.Warn("Blockchain health manager already started") + c.logger.Warn("Blockchain health manager already started") return } @@ -188,18 +190,19 @@ func (c *Client) Stop() { } func (c *Client) monitorHealth(ctx context.Context, statusCh chan struct{}) { + defer appCommon.LogOnPanic() sendFullStatusEventFunc := func() { blockchainStatus := c.healthMgr.GetFullStatus() encodedMessage, err := json.Marshal(blockchainStatus) if err != nil { - c.log.Warn("could not marshal full blockchain status", "error", err) + c.logger.Warn("could not marshal full blockchain status", zap.Error(err)) return } if c.walletFeed == nil { return } // FIXME: remove these excessive logs in future release (2.31+) - c.log.Debug("Sending blockchain health status event", "status", string(encodedMessage)) + c.logger.Debug("Sending blockchain health status event", zap.String("status", string(encodedMessage))) c.walletFeed.Send(walletevent.Event{ Type: EventBlockchainHealthChanged, Message: string(encodedMessage), @@ -302,7 +305,7 @@ func (c *Client) getEthClients(network *params.Network) []ethclient.RPSLimitedEt rpcClient, err = gethrpc.DialOptions(context.Background(), provider.URL, opts...) if err != nil { - c.log.Error("dial server "+provider.Key, "error", err) + c.logger.Error("dial server "+provider.Key, zap.Error(err)) } // If using the status-proxy, consider each endpoint as a separate provider @@ -317,7 +320,7 @@ func (c *Client) getEthClients(network *params.Network) []ethclient.RPSLimitedEt rpcLimiter, err = c.getRPCRpsLimiter(circuitKey) if err != nil { - c.log.Error("get RPC limiter "+provider.Key, "error", err) + c.logger.Error("get RPC limiter "+provider.Key, zap.Error(err)) } ethClients = append(ethClients, ethclient.NewRPSLimitedEthClient(rpcClient, rpcLimiter, circuitKey)) @@ -422,7 +425,7 @@ func (c *Client) CallContextIgnoringLocalHandlers(ctx context.Context, result in } if c.local == nil { - c.log.Warn("Local JSON-RPC endpoint missing", "method", method) + c.logger.Warn("Local JSON-RPC endpoint missing", zap.String("method", method)) return errors.New("missing local JSON-RPC endpoint") } return c.local.CallContext(ctx, result, method, args...) diff --git a/rpc/provider.go b/rpc/provider.go index 35a0701bb57..329388570cd 100644 --- a/rpc/provider.go +++ b/rpc/provider.go @@ -5,6 +5,8 @@ import ( "sort" "strings" + "go.uber.org/zap" + "github.com/status-im/status-go/params" ) @@ -72,7 +74,7 @@ func (c *Client) prepareProviders(network *params.Network) []Provider { // Retrieve the proxy provider configuration proxyProvider, err := getProviderConfig(c.providerConfigs, ProviderStatusProxy) if err != nil { - c.log.Warn("could not find provider config for status-proxy", "error", err) + c.logger.Warn("could not find provider config for status-proxy", zap.Error(err)) } // Add main and fallback providers diff --git a/server/certs.go b/server/certs.go index 4a50deec85a..eb8dd475b81 100644 --- a/server/certs.go +++ b/server/certs.go @@ -12,7 +12,7 @@ import ( "net" "time" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) var globalMediaCertificate *tls.Certificate = nil @@ -83,7 +83,11 @@ func generateMediaTLSCert() error { now := time.Now() notBefore := now.Add(-365 * 24 * time.Hour * 100) notAfter := now.Add(365 * 24 * time.Hour * 100) - log.Debug("generate media cert", "system time", time.Now().String(), "cert notBefore", notBefore.String(), "cert notAfter", notAfter.String()) + logutils.ZapLogger().Debug("generate media cert", + logutils.UnixTimeMs("system time", time.Now()), + logutils.UnixTimeMs("cert notBefore", notBefore), + logutils.UnixTimeMs("cert notAfter", notAfter), + ) finalCert, certPem, err := GenerateTLSCert(notBefore, notAfter, []net.IP{}, []string{Localhost}) if err != nil { return err diff --git a/server/pairing/preflight/preflight.go b/server/pairing/preflight/preflight.go index 2b9db1303f8..dcac97736ac 100644 --- a/server/pairing/preflight/preflight.go +++ b/server/pairing/preflight/preflight.go @@ -11,8 +11,6 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" - "github.com/status-im/status-go/server/pairing" "github.com/status-im/status-go/timesource" @@ -40,7 +38,10 @@ func preflightHandler(w http.ResponseWriter, r *http.Request) { func makeCert(address net.IP) (*tls.Certificate, []byte, error) { now := timesource.GetCurrentTime() - log.Debug("makeCert", "system time", time.Now().String(), "timesource time", now.String()) + logutils.ZapLogger().Debug("makeCert", + logutils.UnixTimeMs("system time", time.Now()), + logutils.UnixTimeMs("timesource time", now), + ) notBefore := now.Add(-pairing.CertificateMaxClockDrift) notAfter := now.Add(pairing.CertificateMaxClockDrift) return server.GenerateTLSCert(notBefore, notAfter, []net.IP{address}, []string{}) diff --git a/server/pairing/server.go b/server/pairing/server.go index eccf5a276a5..def85819a66 100644 --- a/server/pairing/server.go +++ b/server/pairing/server.go @@ -9,8 +9,6 @@ import ( "runtime" "time" - "github.com/ethereum/go-ethereum/log" - "go.uber.org/zap" "github.com/status-im/status-go/timesource" @@ -80,7 +78,10 @@ func MakeServerConfig(config *ServerConfig) error { } now := timesource.GetCurrentTime() - log.Debug("pairing server generate cert", "system time", time.Now().String(), "timesource time", now.String()) + logutils.ZapLogger().Debug("pairing server generate cert", + logutils.UnixTimeMs("system time", time.Now()), + logutils.UnixTimeMs("timesource time", now), + ) tlsCert, _, err := GenerateCertFromKey(tlsKey, now, ips, []string{}) if err != nil { return err diff --git a/server/pairing/sync_device_test.go b/server/pairing/sync_device_test.go index 9598c9969d0..14ce6bffd89 100644 --- a/server/pairing/sync_device_test.go +++ b/server/pairing/sync_device_test.go @@ -107,7 +107,7 @@ func (s *SyncDeviceSuite) prepareBackendWithAccount(mnemonic, tmpdir string) *ap } func (s *SyncDeviceSuite) prepareBackendWithoutAccount(tmpdir string) *api.GethStatusBackend { - backend := api.NewGethStatusBackend() + backend := api.NewGethStatusBackend(s.logger) backend.UpdateRootDataDir(tmpdir) return backend } diff --git a/services/accounts/accounts.go b/services/accounts/accounts.go index bb9f1d86cbb..e2cf86e604e 100644 --- a/services/accounts/accounts.go +++ b/services/accounts/accounts.go @@ -8,9 +8,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/account" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" walletsettings "github.com/status-im/status-go/multiaccounts/settings_wallet" "github.com/status-im/status-go/params" @@ -39,7 +39,7 @@ type DerivedAddress struct { } func (api *API) SaveAccount(ctx context.Context, account *accounts.Account) error { - log.Info("[AccountsAPI::SaveAccount]") + logutils.ZapLogger().Info("[AccountsAPI::SaveAccount]") err := (*api.messenger).SaveOrUpdateAccount(account) if err != nil { return err @@ -54,7 +54,7 @@ func (api *API) SaveAccount(ctx context.Context, account *accounts.Account) erro // Setting `Keypair` without `Accounts` will update keypair only, `Keycards` won't be saved/updated this way. func (api *API) SaveKeypair(ctx context.Context, keypair *accounts.Keypair) error { - log.Info("[AccountsAPI::SaveKeypair]") + logutils.ZapLogger().Info("[AccountsAPI::SaveKeypair]") err := (*api.messenger).SaveOrUpdateKeypair(keypair) if err != nil { return err diff --git a/services/accounts/accountsevent/watcher.go b/services/accounts/accountsevent/watcher.go index b10e4e4750e..496542da11a 100644 --- a/services/accounts/accountsevent/watcher.go +++ b/services/accounts/accountsevent/watcher.go @@ -3,9 +3,11 @@ package accountsevent import ( "context" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/services/wallet/async" ) @@ -51,7 +53,7 @@ func onAccountsChange(accountsDB *accounts.Database, callback AccountsChangeCb, currentEthAddresses, err := accountsDB.GetWalletAddresses() if err != nil { - log.Error("failed getting wallet addresses", "error", err) + logutils.ZapLogger().Error("failed getting wallet addresses", zap.Error(err)) return } @@ -76,7 +78,7 @@ func watch(ctx context.Context, accountsDB *accounts.Database, accountFeed *even return nil case err := <-sub.Err(): if err != nil { - log.Error("accounts watcher subscription failed", "error", err) + logutils.ZapLogger().Error("accounts watcher subscription failed", zap.Error(err)) } case ev := <-ch: onAccountsChange(accountsDB, callback, ev.Accounts, ev.Type) diff --git a/services/accounts/settingsevent/watcher.go b/services/accounts/settingsevent/watcher.go index 9bc99fb4db6..9477fea2213 100644 --- a/services/accounts/settingsevent/watcher.go +++ b/services/accounts/settingsevent/watcher.go @@ -3,8 +3,10 @@ package settingsevent import ( "context" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/settings" "github.com/status-im/status-go/services/wallet/async" ) @@ -61,7 +63,7 @@ func watch(ctx context.Context, feed *event.Feed, callback SettingChangeCb) erro return nil case err := <-sub.Err(): if err != nil { - log.Error("settings watcher subscription failed", "error", err) + logutils.ZapLogger().Error("settings watcher subscription failed", zap.Error(err)) } case ev := <-ch: if ev.Type == EventTypeChanged { diff --git a/services/appmetrics/api.go b/services/appmetrics/api.go index f294736b97f..7dcf59a4ad0 100644 --- a/services/appmetrics/api.go +++ b/services/appmetrics/api.go @@ -5,8 +5,8 @@ import ( "github.com/pborman/uuid" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/appmetrics" + "github.com/status-im/status-go/logutils" ) func NewAPI(db *appmetrics.Database) *API { @@ -19,16 +19,16 @@ type API struct { } func (api *API) ValidateAppMetrics(ctx context.Context, appMetrics []appmetrics.AppMetric) error { - log.Debug("[AppMetricsAPI::ValidateAppMetrics]") + logutils.ZapLogger().Debug("[AppMetricsAPI::ValidateAppMetrics]") return api.db.ValidateAppMetrics(appMetrics) } func (api *API) SaveAppMetrics(ctx context.Context, appMetrics []appmetrics.AppMetric) error { - log.Debug("[AppMetricsAPI::SaveAppMetrics]") + logutils.ZapLogger().Debug("[AppMetricsAPI::SaveAppMetrics]") return api.db.SaveAppMetrics(appMetrics, api.sessionID) } func (api *API) GetAppMetrics(ctx context.Context, limit int, offset int) (appmetrics.Page, error) { - log.Debug("[AppMetricsAPI::GetAppMetrics]") + logutils.ZapLogger().Debug("[AppMetricsAPI::GetAppMetrics]") return api.db.GetAppMetrics(limit, offset) } diff --git a/services/browsers/api.go b/services/browsers/api.go index d880287d99d..fcfad1d2889 100644 --- a/services/browsers/api.go +++ b/services/browsers/api.go @@ -3,7 +3,9 @@ package browsers import ( "context" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" ) func NewAPI(db *Database) *API { @@ -16,36 +18,36 @@ type API struct { } func (api *API) GetBookmarks(ctx context.Context) ([]*Bookmark, error) { - log.Debug("call to get bookmarks") + logutils.ZapLogger().Debug("call to get bookmarks") rst, err := api.db.GetBookmarks() - log.Debug("result from database for bookmarks", "len", len(rst)) + logutils.ZapLogger().Debug("result from database for bookmarks", zap.Int("len", len(rst))) return rst, err } func (api *API) StoreBookmark(ctx context.Context, bookmark Bookmark) (Bookmark, error) { - log.Debug("call to create a bookmark") + logutils.ZapLogger().Debug("call to create a bookmark") bookmarkResult, err := api.db.StoreBookmark(bookmark) - log.Debug("result from database for creating a bookmark", "err", err) + logutils.ZapLogger().Debug("result from database for creating a bookmark", zap.Error(err)) return bookmarkResult, err } func (api *API) UpdateBookmark(ctx context.Context, originalURL string, bookmark Bookmark) error { - log.Debug("call to update a bookmark") + logutils.ZapLogger().Debug("call to update a bookmark") err := api.db.UpdateBookmark(originalURL, bookmark) - log.Debug("result from database for updating a bookmark", "err", err) + logutils.ZapLogger().Debug("result from database for updating a bookmark", zap.Error(err)) return err } func (api *API) DeleteBookmark(ctx context.Context, url string) error { - log.Debug("call to remove a bookmark") + logutils.ZapLogger().Debug("call to remove a bookmark") err := api.db.DeleteBookmark(url) - log.Debug("result from database for remove a bookmark", "err", err) + logutils.ZapLogger().Debug("result from database for remove a bookmark", zap.Error(err)) return err } func (api *API) RemoveBookmark(ctx context.Context, url string) error { - log.Debug("call to remove a bookmark logically") + logutils.ZapLogger().Debug("call to remove a bookmark logically") err := api.db.RemoveBookmark(url) - log.Debug("result from database for remove a bookmark logically", "err", err) + logutils.ZapLogger().Debug("result from database for remove a bookmark logically", zap.Error(err)) return err } diff --git a/services/browsers/database.go b/services/browsers/database.go index ddd213cf5f2..104922d2b77 100644 --- a/services/browsers/database.go +++ b/services/browsers/database.go @@ -5,8 +5,9 @@ import ( "database/sql" "github.com/mat/besticon/besticon" + "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) // Database sql wrapper for operations with browser objects. @@ -82,7 +83,7 @@ func (db *Database) StoreBookmark(bookmark Bookmark) (Bookmark, error) { bookmark.ImageURL = icons[0].URL } } else { - log.Error("error getting the bookmark icon", "iconError", iconError) + logutils.ZapLogger().Error("error getting the bookmark icon", zap.Error(iconError)) } _, err = insert.Exec(bookmark.URL, bookmark.Name, bookmark.ImageURL, bookmark.Removed, bookmark.Clock, bookmark.DeletedAt) diff --git a/services/chat/api.go b/services/chat/api.go index 20d3abd9834..22cf0d351c0 100644 --- a/services/chat/api.go +++ b/services/chat/api.go @@ -4,8 +4,6 @@ import ( "context" "errors" - "github.com/ethereum/go-ethereum/log" - "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/images" "github.com/status-im/status-go/protocol" @@ -19,14 +17,12 @@ var ( func NewAPI(service *Service) *API { return &API{ - s: service, - log: log.New("package", "status-go/services/chat.API"), + s: service, } } type API struct { - s *Service - log log.Logger + s *Service } func (api *API) EditChat(ctx context.Context, communityID types.HexBytes, chatID string, name string, color string, image images.CroppedImage) (*protocol.MessengerResponse, error) { diff --git a/services/communitytokens/api.go b/services/communitytokens/api.go index 9bbee3ac9dd..1eb73adb14b 100644 --- a/services/communitytokens/api.go +++ b/services/communitytokens/api.go @@ -6,11 +6,11 @@ import ( "math/big" "github.com/pkg/errors" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/contracts/community-tokens/assets" "github.com/status-im/status-go/contracts/community-tokens/collectibles" communitytokendeployer "github.com/status-im/status-go/contracts/community-tokens/deployer" @@ -19,6 +19,7 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/images" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/protocol/communities/token" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/services/utils" @@ -109,7 +110,7 @@ func (api *API) DeployCollectibles(ctx context.Context, chainID uint64, deployme ethClient, err := api.s.manager.rpcClient.EthClient(chainID) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return DeploymentDetails{}, err } address, tx, _, err := collectibles.DeployCollectibles(transactOpts, ethClient, deploymentParameters.Name, @@ -118,7 +119,7 @@ func (api *API) DeployCollectibles(ctx context.Context, chainID uint64, deployme deploymentParameters.TokenURI, common.HexToAddress(deploymentParameters.OwnerTokenAddress), common.HexToAddress(deploymentParameters.MasterTokenAddress)) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return DeploymentDetails{}, err } @@ -132,7 +133,7 @@ func (api *API) DeployCollectibles(ctx context.Context, chainID uint64, deployme "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return DeploymentDetails{}, err } @@ -223,16 +224,16 @@ func (api *API) DeployOwnerToken(ctx context.Context, chainID uint64, return DeploymentDetails{}, err } - log.Debug("Signature:", communitySignature) + logutils.ZapLogger().Debug("Prepare deployment", zap.Any("signature", communitySignature)) tx, err := deployerContractInst.Deploy(transactOpts, ownerTokenConfig, masterTokenConfig, communitySignature, common.FromHex(signerPubKey)) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return DeploymentDetails{}, err } - log.Debug("Contract deployed hash:", tx.Hash().String()) + logutils.ZapLogger().Debug("Contract deployed", zap.Stringer("hash", tx.Hash())) err = api.s.pendingTracker.TrackPendingTransaction( wcommon.ChainID(chainID), @@ -244,7 +245,7 @@ func (api *API) DeployOwnerToken(ctx context.Context, chainID uint64, "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return DeploymentDetails{}, err } @@ -282,7 +283,7 @@ func (api *API) DeployAssets(ctx context.Context, chainID uint64, deploymentPara ethClient, err := api.s.manager.rpcClient.EthClient(chainID) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return DeploymentDetails{}, err } @@ -293,7 +294,7 @@ func (api *API) DeployAssets(ctx context.Context, chainID uint64, deploymentPara common.HexToAddress(deploymentParameters.OwnerTokenAddress), common.HexToAddress(deploymentParameters.MasterTokenAddress)) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return DeploymentDetails{}, err } @@ -307,7 +308,7 @@ func (api *API) DeployAssets(ctx context.Context, chainID uint64, deploymentPara "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return DeploymentDetails{}, err } @@ -404,7 +405,7 @@ func (api *API) MintTokens(ctx context.Context, chainID uint64, contractAddress "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return "", err } @@ -471,7 +472,7 @@ func (api *API) RemoteBurn(ctx context.Context, chainID uint64, contractAddress additionalData, ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return "", err } @@ -523,7 +524,7 @@ func (api *API) Burn(ctx context.Context, chainID uint64, contractAddress string "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return "", err } diff --git a/services/communitytokens/estimations.go b/services/communitytokens/estimations.go index 91d133421a6..bb7fd0ef6a5 100644 --- a/services/communitytokens/estimations.go +++ b/services/communitytokens/estimations.go @@ -6,16 +6,18 @@ import ( "math/big" "strings" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/status-im/status-go/contracts/community-tokens/assets" "github.com/status-im/status-go/contracts/community-tokens/collectibles" communitytokendeployer "github.com/status-im/status-go/contracts/community-tokens/deployer" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/protocol/protobuf" "github.com/status-im/status-go/services/wallet/bigint" "github.com/status-im/status-go/services/wallet/router/fees" @@ -159,7 +161,7 @@ func (s *Service) deployOwnerTokenGasUnits(ctx context.Context, chainID uint64, communityID string, signerPubKey string) (uint64, error) { ethClient, err := s.manager.rpcClient.EthClient(chainID) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return 0, err } @@ -217,14 +219,14 @@ func (s *Service) deployOwnerTokenGasUnits(ctx context.Context, chainID uint64, } finalEstimation := estimate + uint64(float32(estimate)*0.1) - log.Debug("Owner token deployment gas estimation: ", finalEstimation) + logutils.ZapLogger().Debug("Owner token deployment estimation", zap.Uint64("gas", finalEstimation)) return finalEstimation, nil } func (s *Service) deployCollectiblesGasUnits(ctx context.Context, chainID uint64, fromAddress string) (uint64, error) { ethClient, err := s.manager.rpcClient.EthClient(chainID) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return 0, err } @@ -252,14 +254,14 @@ func (s *Service) deployCollectiblesGasUnits(ctx context.Context, chainID uint64 } finalEstimation := estimate + uint64(float32(estimate)*0.1) - log.Debug("Collectibles deployment gas estimation: ", finalEstimation) + logutils.ZapLogger().Debug("Collectibles deployment estimation", zap.Uint64("gas", finalEstimation)) return finalEstimation, nil } func (s *Service) deployAssetsGasUnits(ctx context.Context, chainID uint64, fromAddress string) (uint64, error) { ethClient, err := s.manager.rpcClient.EthClient(chainID) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return 0, err } @@ -287,7 +289,7 @@ func (s *Service) deployAssetsGasUnits(ctx context.Context, chainID uint64, from } finalEstimation := estimate + uint64(float32(estimate)*0.1) - log.Debug("Assets deployment gas estimation: ", finalEstimation) + logutils.ZapLogger().Debug("Assets deployment estimation: ", zap.Uint64("gas", finalEstimation)) return finalEstimation, nil } @@ -404,7 +406,7 @@ func (s *Service) estimateL1Fee(ctx context.Context, chainID uint64, sendArgs tr func (s *Service) estimateMethodForTokenInstance(ctx context.Context, contractInstance TokenInstance, chainID uint64, contractAddress string, fromAddress string, methodName string, args ...interface{}) (uint64, error) { ethClient, err := s.manager.rpcClient.EthClient(chainID) if err != nil { - log.Error(err.Error()) + logutils.ZapLogger().Error(err.Error()) return 0, err } diff --git a/services/communitytokens/service.go b/services/communitytokens/service.go index bea446d0d4c..a1a8930d354 100644 --- a/services/communitytokens/service.go +++ b/services/communitytokens/service.go @@ -9,11 +9,11 @@ import ( "strings" "github.com/pkg/errors" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" ethRpc "github.com/ethereum/go-ethereum/rpc" "github.com/status-im/status-go/account" @@ -22,6 +22,7 @@ import ( communityownertokenregistry "github.com/status-im/status-go/contracts/community-tokens/registry" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol" "github.com/status-im/status-go/protocol/communities" @@ -98,7 +99,7 @@ func (s *Service) handleWalletEvent(event walletevent.Event) { var p transactions.StatusChangedPayload err := json.Unmarshal([]byte(event.Message), &p) if err != nil { - log.Error(errors.Wrap(err, fmt.Sprintf("can't parse transaction message %v\n", event.Message)).Error()) + logutils.ZapLogger().Error(errors.Wrap(err, fmt.Sprintf("can't parse transaction message %v\n", event.Message)).Error()) return } if p.Status == transactions.Pending { @@ -106,7 +107,7 @@ func (s *Service) handleWalletEvent(event walletevent.Event) { } pendingTransaction, err := s.pendingTracker.GetPendingEntry(p.ChainID, p.Hash) if err != nil { - log.Error(errors.Wrap(err, fmt.Sprintf("no pending transaction with hash %v on chain %v\n", p.Hash, p.ChainID)).Error()) + logutils.ZapLogger().Error(errors.Wrap(err, fmt.Sprintf("no pending transaction with hash %v on chain %v\n", p.Hash, p.ChainID)).Error()) return } @@ -131,7 +132,7 @@ func (s *Service) handleWalletEvent(event walletevent.Event) { err = s.pendingTracker.Delete(context.Background(), p.ChainID, p.Hash) if err != nil { - log.Error(errors.Wrap(err, fmt.Sprintf("can't delete pending transaction with hash %v on chain %v\n", p.Hash, p.ChainID)).Error()) + logutils.ZapLogger().Error(errors.Wrap(err, fmt.Sprintf("can't delete pending transaction with hash %v on chain %v\n", p.Hash, p.ChainID)).Error()) } errorStr := "" @@ -152,7 +153,7 @@ func (s *Service) handleAirdropCommunityToken(status string, pendingTransaction publishErr := s.publishTokenActionToPrivilegedMembers(communityToken.CommunityID, uint64(communityToken.ChainID), communityToken.Address, protobuf.CommunityTokenAction_AIRDROP) if publishErr != nil { - log.Warn("can't publish airdrop action") + logutils.ZapLogger().Warn("can't publish airdrop action") } } return communityToken, err @@ -166,7 +167,7 @@ func (s *Service) handleRemoteDestructCollectible(status string, pendingTransact publishErr := s.publishTokenActionToPrivilegedMembers(communityToken.CommunityID, uint64(communityToken.ChainID), communityToken.Address, protobuf.CommunityTokenAction_REMOTE_DESTRUCT) if publishErr != nil { - log.Warn("can't publish remote destruct action") + logutils.ZapLogger().Warn("can't publish remote destruct action") } } return communityToken, err @@ -193,7 +194,7 @@ func (s *Service) handleBurnCommunityToken(status string, pendingTransaction *tr publishErr := s.publishTokenActionToPrivilegedMembers(communityToken.CommunityID, uint64(communityToken.ChainID), communityToken.Address, protobuf.CommunityTokenAction_BURN) if publishErr != nil { - log.Warn("can't publish burn action") + logutils.ZapLogger().Warn("can't publish burn action") } } return communityToken, err @@ -528,7 +529,7 @@ func (s *Service) SetSignerPubKey(ctx context.Context, chainID uint64, contractA "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return "", err } @@ -723,9 +724,9 @@ func (s *Service) ReTrackOwnerTokenDeploymentTransaction(ctx context.Context, ch transactions.Keep, "", ) - log.Debug("retracking pending transaction with hashId ", hashString) + logutils.ZapLogger().Debug("retracking pending transaction", zap.String("hashId", hashString)) } else { - log.Debug("pending transaction with hashId is already tracked ", hashString) + logutils.ZapLogger().Debug("pending transaction already tracked", zap.String("hashId", hashString)) } return err } diff --git a/services/ens/api.go b/services/ens/api.go index f4067f1a31b..eae63a56077 100644 --- a/services/ens/api.go +++ b/services/ens/api.go @@ -18,19 +18,20 @@ import ( "github.com/pkg/errors" "github.com/wealdtech/go-ens/v3" "github.com/wealdtech/go-multicodec" + "go.uber.org/zap" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/account" gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/contracts" "github.com/status-im/status-go/contracts/registrar" "github.com/status-im/status-go/contracts/resolver" "github.com/status-im/status-go/contracts/snt" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/services/utils" @@ -247,7 +248,7 @@ func (api *API) AddressOf(ctx context.Context, chainID uint64, username string) } func (api *API) usernameRegistrarAddr(ctx context.Context, chainID uint64) (common.Address, error) { - log.Info("obtaining username registrar address") + logutils.ZapLogger().Info("obtaining username registrar address") api.addrPerChainMutex.Lock() defer api.addrPerChainMutex.Unlock() addr, ok := api.addrPerChain[chainID] @@ -279,12 +280,12 @@ func (api *API) usernameRegistrarAddr(ctx context.Context, chainID uint64) (comm for { select { case <-api.quit: - log.Info("quitting ens contract subscription") + logutils.ZapLogger().Info("quitting ens contract subscription") sub.Unsubscribe() return case err := <-sub.Err(): if err != nil { - log.Error("ens contract subscription error: " + err.Error()) + logutils.ZapLogger().Error("ens contract subscription error: " + err.Error()) } return case vLog := <-logs: @@ -365,14 +366,14 @@ func (api *API) Release(ctx context.Context, chainID uint64, txArgs transactions "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return "", err } err = api.Remove(ctx, chainID, fullDomainName(username)) if err != nil { - log.Warn("Releasing ENS username: transaction successful, but removing failed") + logutils.ZapLogger().Warn("Releasing ENS username: transaction successful, but removing failed") } return tx.Hash().String(), nil @@ -494,13 +495,13 @@ func (api *API) Register(ctx context.Context, chainID uint64, txArgs transaction "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return "", err } err = api.Add(ctx, chainID, fullDomainName(username)) if err != nil { - log.Warn("Registering ENS username: transaction successful, but adding failed") + logutils.ZapLogger().Warn("Registering ENS username: transaction successful, but adding failed") } return tx.Hash().String(), nil @@ -612,14 +613,14 @@ func (api *API) SetPubKey(ctx context.Context, chainID uint64, txArgs transactio "", ) if err != nil { - log.Error("TrackPendingTransaction error", "error", err) + logutils.ZapLogger().Error("TrackPendingTransaction error", zap.Error(err)) return "", err } err = api.Add(ctx, chainID, fullDomainName(username)) if err != nil { - log.Warn("Registering ENS username: transaction successful, but adding failed") + logutils.ZapLogger().Warn("Registering ENS username: transaction successful, but adding failed") } return tx.Hash().String(), nil diff --git a/services/ext/api.go b/services/ext/api.go index d7918fb4dd0..d82e77a53ca 100644 --- a/services/ext/api.go +++ b/services/ext/api.go @@ -11,14 +11,15 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/multiformats/go-multiaddr" + "go.uber.org/zap" "github.com/status-im/status-go/account" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/browsers" "github.com/status-im/status-go/services/wallet" "github.com/status-im/status-go/services/wallet/bigint" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" @@ -154,7 +155,7 @@ type MessagesResponse struct { type PublicAPI struct { service *Service eventSub mailservers.EnvelopeEventSubscriber - log log.Logger + logger *zap.Logger } // NewPublicAPI returns instance of the public API. @@ -162,7 +163,7 @@ func NewPublicAPI(s *Service, eventSub mailservers.EnvelopeEventSubscriber) *Pub return &PublicAPI{ service: s, eventSub: eventSub, - log: log.New("package", "status-go/services/sshext.PublicAPI"), + logger: logutils.ZapLogger().Named("sshextService"), } } @@ -340,14 +341,14 @@ func (api *PublicAPI) UnmuteChat(parent context.Context, chatID string) error { } func (api *PublicAPI) BlockContact(ctx context.Context, contactID string) (*protocol.MessengerResponse, error) { - api.log.Info("blocking contact", "contact", contactID) + api.logger.Info("blocking contact", zap.String("contact", contactID)) return api.service.messenger.BlockContact(ctx, contactID, false) } // This function is the same as the one above, but used only on the desktop side, since at the end it doesn't set // `Added` flag to `false`, but only `Blocked` to `true` func (api *PublicAPI) BlockContactDesktop(ctx context.Context, contactID string) (*protocol.MessengerResponse, error) { - api.log.Info("blocking contact", "contact", contactID) + api.logger.Info("blocking contact", zap.String("contact", contactID)) return api.service.messenger.BlockContactDesktop(ctx, contactID) } diff --git a/services/ext/mailrequests.go b/services/ext/mailrequests.go index 3c55bd4cbdb..903e5bb949c 100644 --- a/services/ext/mailrequests.go +++ b/services/ext/mailrequests.go @@ -3,10 +3,11 @@ package ext import ( "sync" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" "github.com/status-im/status-go/common" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/ext/mailservers" ) @@ -113,7 +114,7 @@ func (m *MailRequestMonitor) handleEventMailServerRequestCompleted(event types.E if !ok || state != MailServerRequestSent { return } - log.Debug("mailserver response received", "hash", event.Hash) + logutils.ZapLogger().Debug("mailserver response received", zap.Stringer("hash", event.Hash)) delete(m.cache, event.Hash) if m.handler != nil { if resp, ok := event.Data.(*types.MailServerResponse); ok { @@ -130,7 +131,7 @@ func (m *MailRequestMonitor) handleEventMailServerRequestExpired(event types.Env if !ok || state != MailServerRequestSent { return } - log.Debug("mailserver response expired", "hash", event.Hash) + logutils.ZapLogger().Debug("mailserver response expired", zap.Stringer("hash", event.Hash)) delete(m.cache, event.Hash) if m.handler != nil { m.handler.MailServerRequestExpired(event.Hash) diff --git a/services/ext/mailservers/connmanager.go b/services/ext/mailservers/connmanager.go index 25c55229eca..a5e93beaf87 100644 --- a/services/ext/mailservers/connmanager.go +++ b/services/ext/mailservers/connmanager.go @@ -4,13 +4,15 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/status-im/status-go/common" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" ) const ( @@ -101,10 +103,10 @@ func (ps *ConnectionManager) Start() { case <-ps.quit: return case err := <-sub.Err(): - log.Error("retry after error subscribing to p2p events", "error", err) + logutils.ZapLogger().Error("retry after error subscribing to p2p events", zap.Error(err)) return case err := <-whisperSub.Err(): - log.Error("retry after error suscribing to eventSub events", "error", err) + logutils.ZapLogger().Error("retry after error suscribing to eventSub events", zap.Error(err)) return case newNodes := <-ps.notifications: state.processReplacement(newNodes, events) @@ -125,7 +127,7 @@ func (ps *ConnectionManager) Start() { continue } failuresPerServer[ev.Peer]++ - log.Debug("request to a mail server expired, disconnect a peer", "address", ev.Peer) + logutils.ZapLogger().Debug("request to a mail server expired, disconnect a peer", zap.Stringer("address", ev.Peer)) if failuresPerServer[ev.Peer] >= ps.maxFailures { state.nodeDisconnected(ev.Peer) } @@ -157,12 +159,13 @@ func (state *internalState) processReplacement(newNodes []*enode.Node, events <- } state.replaceNodes(replacement) if state.ReachedTarget() { - log.Debug("already connected with required target", "target", state.target) + logutils.ZapLogger().Debug("already connected with required target", zap.Int("target", state.target)) return } if state.timeout != 0 { - log.Debug("waiting defined timeout to establish connections", - "timeout", state.timeout, "target", state.target) + logutils.ZapLogger().Debug("waiting defined timeout to establish connections", + zap.Duration("timeout", state.timeout), + zap.Int("target", state.target)) timer := time.NewTimer(state.timeout) waitForConnections(state, timer.C, events) timer.Stop() @@ -250,10 +253,10 @@ func (state *internalState) nodeDisconnected(peer types.EnodeID) { func processPeerEvent(state *internalState, ev *p2p.PeerEvent) { switch ev.Type { case p2p.PeerEventTypeAdd: - log.Debug("connected to a mailserver", "address", ev.Peer) + logutils.ZapLogger().Debug("connected to a mailserver", zap.Stringer("address", ev.Peer)) state.nodeAdded(types.EnodeID(ev.Peer)) case p2p.PeerEventTypeDrop: - log.Debug("mailserver disconnected", "address", ev.Peer) + logutils.ZapLogger().Debug("mailserver disconnected", zap.Stringer("address", ev.Peer)) state.nodeDisconnected(types.EnodeID(ev.Peer)) } } diff --git a/services/ext/mailservers/connmonitor.go b/services/ext/mailservers/connmonitor.go index ca20a5928b7..0f90c08deed 100644 --- a/services/ext/mailservers/connmonitor.go +++ b/services/ext/mailservers/connmonitor.go @@ -4,10 +4,11 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" "github.com/status-im/status-go/common" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" ) // NewLastUsedConnectionMonitor returns pointer to the instance of LastUsedConnectionMonitor. @@ -45,7 +46,7 @@ func (mon *LastUsedConnectionMonitor) Start() { case <-mon.quit: return case err := <-sub.Err(): - log.Error("retry after error suscribing to eventSub events", "error", err) + logutils.ZapLogger().Error("retry after error suscribing to eventSub events", zap.Error(err)) return case ev := <-events: node := mon.ps.Get(ev.Peer) @@ -55,7 +56,7 @@ func (mon *LastUsedConnectionMonitor) Start() { if ev.Event == types.EventMailServerRequestCompleted { err := mon.updateRecord(ev.Peer) if err != nil { - log.Error("unable to update storage", "peer", ev.Peer, "error", err) + logutils.ZapLogger().Error("unable to update storage", zap.Stringer("peer", ev.Peer), zap.Error(err)) } } } diff --git a/services/ext/service.go b/services/ext/service.go index f51f94ea713..e7eea0598bf 100644 --- a/services/ext/service.go +++ b/services/ext/service.go @@ -20,7 +20,6 @@ import ( gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" @@ -35,6 +34,7 @@ import ( "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/images" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/params" @@ -294,7 +294,7 @@ func (c *verifyTransactionClient) TransactionByHash(ctx context.Context, hash ty func (s *Service) verifyTransactionLoop(tick time.Duration, cancel <-chan struct{}) { defer gocommon.LogOnPanic() if s.config.ShhextConfig.VerifyTransactionURL == "" { - log.Warn("not starting transaction loop") + logutils.ZapLogger().Warn("not starting transaction loop") return } @@ -308,7 +308,7 @@ func (s *Service) verifyTransactionLoop(tick time.Duration, cancel <-chan struct case <-ticker.C: accounts, err := s.accountsDB.GetActiveAccounts() if err != nil { - log.Error("failed to retrieve accounts", "err", err) + logutils.ZapLogger().Error("failed to retrieve accounts", zap.Error(err)) } var wallets []types.Address for _, account := range accounts { @@ -319,7 +319,7 @@ func (s *Service) verifyTransactionLoop(tick time.Duration, cancel <-chan struct response, err := s.messenger.ValidateTransactions(ctx, wallets) if err != nil { - log.Error("failed to validate transactions", "err", err) + logutils.ZapLogger().Error("failed to validate transactions", zap.Error(err)) continue } s.messenger.PublishMessengerResponse(response) @@ -363,7 +363,7 @@ func (s *Service) Start() error { // Stop is run when a service is stopped. func (s *Service) Stop() error { - log.Info("Stopping shhext service") + logutils.ZapLogger().Info("Stopping shhext service") if s.cancelMessenger != nil { select { case <-s.cancelMessenger: @@ -376,7 +376,7 @@ func (s *Service) Stop() error { if s.messenger != nil { if err := s.messenger.Shutdown(); err != nil { - log.Error("failed to stop messenger", "err", err) + logutils.ZapLogger().Error("failed to stop messenger", zap.Error(err)) return err } s.messenger = nil diff --git a/services/gif/gif.go b/services/gif/gif.go index bec3d93a763..b6b0862ed2a 100644 --- a/services/gif/gif.go +++ b/services/gif/gif.go @@ -7,7 +7,9 @@ import ( "net/http" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/multiaccounts/settings" ) @@ -41,7 +43,7 @@ type API struct { } func (api *API) SetTenorAPIKey(key string) (err error) { - log.Info("[GifAPI::SetTenorAPIKey]") + logutils.ZapLogger().Info("[GifAPI::SetTenorAPIKey]") err = api.db.SaveSettingField(settings.GifAPIKey, key) if err != nil { return err @@ -67,7 +69,7 @@ func (api *API) GetContentWithRetry(path string) (value string, err error) { response, err = client.Get(baseURL + path + defaultParams + tenorAPIKey) if err != nil { - log.Error("can't get content from path %s with %s", path, err.Error()) + logutils.ZapLogger().Error("can't get content", zap.String("path", path), zap.Error(err)) currentRetry++ time.Sleep(100 * time.Millisecond) } else { @@ -93,12 +95,10 @@ func (api *API) GetContentWithRetry(path string) (value string, err error) { } func (api *API) FetchGifs(path string) (value string, err error) { - log.Info("[GifAPI::fetchGifs]") return api.GetContentWithRetry(path) } func (api *API) UpdateRecentGifs(updatedGifs json.RawMessage) (err error) { - log.Info("[GifAPI::updateRecentGifs]") recentGifsContainer := Container{} err = json.Unmarshal(updatedGifs, &recentGifsContainer) if err != nil { @@ -112,7 +112,6 @@ func (api *API) UpdateRecentGifs(updatedGifs json.RawMessage) (err error) { } func (api *API) UpdateFavoriteGifs(updatedGifs json.RawMessage) (err error) { - log.Info("[GifAPI::updateFavoriteGifs]", updatedGifs) favsGifsContainer := Container{} err = json.Unmarshal(updatedGifs, &favsGifsContainer) if err != nil { @@ -126,7 +125,6 @@ func (api *API) UpdateFavoriteGifs(updatedGifs json.RawMessage) (err error) { } func (api *API) GetRecentGifs() (recentGifs []Gif, err error) { - log.Info("[GifAPI::getRecentGifs]") gifs, err := api.db.GifRecents() if err != nil { return nil, err @@ -144,7 +142,6 @@ func (api *API) GetRecentGifs() (recentGifs []Gif, err error) { } func (api *API) GetFavoriteGifs() (favoriteGifs []Gif, err error) { - log.Info("[GifAPI::getFavoriteGifs]") gifs, err := api.db.GifFavorites() if err != nil { return nil, err diff --git a/services/local-notifications/api.go b/services/local-notifications/api.go index 1ba5ecd6066..632ef499934 100644 --- a/services/local-notifications/api.go +++ b/services/local-notifications/api.go @@ -3,7 +3,7 @@ package localnotifications import ( "context" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) func NewAPI(s *Service) *API { @@ -19,7 +19,7 @@ func (api *API) NotificationPreferences(ctx context.Context) ([]NotificationPref } func (api *API) SwitchWalletNotifications(ctx context.Context, preference bool) error { - log.Debug("Switch Transaction Notification") + logutils.ZapLogger().Debug("Switch Transaction Notification") err := api.s.db.ChangeWalletPreference(preference) if err != nil { return err diff --git a/services/local-notifications/core.go b/services/local-notifications/core.go index 8299b150113..71f229e7fed 100644 --- a/services/local-notifications/core.go +++ b/services/local-notifications/core.go @@ -5,12 +5,14 @@ import ( "encoding/json" "sync" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/rpc" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/services/wallet/transfer" "github.com/status-im/status-go/signal" @@ -156,7 +158,7 @@ func PushMessages(ns []*Notification) { } func pushMessage(notification *Notification) { - log.Debug("Pushing a new push notification") + logutils.ZapLogger().Debug("Pushing a new push notification") signal.SendLocalNotifications(notification) } @@ -181,7 +183,7 @@ func (s *Service) Start() error { return case err := <-sub.Err(): if err != nil { - log.Error("Local notifications transmitter failed with", "error", err) + logutils.ZapLogger().Error("Local notifications transmitter failed with", zap.Error(err)) } return case event := <-events: @@ -190,7 +192,7 @@ func (s *Service) Start() error { } }() - log.Info("Successful start") + logutils.ZapLogger().Info("Successful start") return nil } diff --git a/services/local-notifications/transaction.go b/services/local-notifications/transaction.go index 280b9af188c..7b5b49a6b2b 100644 --- a/services/local-notifications/transaction.go +++ b/services/local-notifications/transaction.go @@ -4,13 +4,15 @@ import ( "encoding/json" "math/big" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/services/wallet/transfer" "github.com/status-im/status-go/services/wallet/walletevent" @@ -53,7 +55,7 @@ func (t transactionBody) MarshalJSON() ([]byte, error) { } func (s *Service) buildTransactionNotification(rawTransfer transfer.Transfer) *Notification { - log.Info("Handled a new transfer in buildTransactionNotification", "info", rawTransfer) + logutils.ZapLogger().Debug("Handled a new transfer in buildTransactionNotification", zap.Any("info", rawTransfer)) var deeplink string var state transactionState @@ -71,13 +73,13 @@ func (s *Service) buildTransactionNotification(rawTransfer transfer.Transfer) *N from, err := s.accountsDB.GetAccountByAddress(types.Address(transfer.From)) if err != nil { - log.Debug("Could not select From account by address", "error", err) + logutils.ZapLogger().Debug("Could not select From account by address", zap.Error(err)) } to, err := s.accountsDB.GetAccountByAddress(types.Address(transfer.To)) if err != nil { - log.Debug("Could not select To account by address", "error", err) + logutils.ZapLogger().Debug("Could not select To account by address", zap.Error(err)) } if from != nil { @@ -108,16 +110,16 @@ func (s *Service) buildTransactionNotification(rawTransfer transfer.Transfer) *N } func (s *Service) transactionsHandler(payload TransactionEvent) { - log.Info("Handled a new transaction", "info", payload) + logutils.ZapLogger().Info("Handled a new transaction", zap.Any("info", payload)) limit := 20 if payload.BlockNumber != nil { for _, address := range payload.Accounts { if payload.BlockNumber.Cmp(payload.MaxKnownBlocks[address]) >= 0 { - log.Info("Handled transfer for address", "info", address) + logutils.ZapLogger().Info("Handled transfer for address", zap.Stringer("info", address)) transfers, err := s.walletDB.GetTransfersByAddressAndBlock(s.chainID, address, payload.BlockNumber, int64(limit)) if err != nil { - log.Error("Could not fetch transfers", "error", err) + logutils.ZapLogger().Error("Could not fetch transfers", zap.Error(err)) } for _, transaction := range transfers { @@ -136,7 +138,7 @@ func (s *Service) SubscribeWallet(publisher *event.Feed) error { preference, err := s.db.GetWalletPreference() if err != nil { - log.Error("Failed to get wallet preference", "error", err) + logutils.ZapLogger().Error("Failed to get wallet preference", zap.Error(err)) s.WatchingEnabled = false } else { s.WatchingEnabled = preference.Enabled @@ -155,7 +157,7 @@ func (s *Service) StartWalletWatcher() { } if s.walletTransmitter.publisher == nil { - log.Error("wallet publisher was not initialized") + logutils.ZapLogger().Error("wallet publisher was not initialized") return } @@ -179,7 +181,7 @@ func (s *Service) StartWalletWatcher() { // technically event.Feed cannot send an error to subscription.Err channel. // the only time we will get an event is when that channel is closed. if err != nil { - log.Error("wallet signals transmitter failed with", "error", err) + logutils.ZapLogger().Error("wallet signals transmitter failed with", zap.Error(err)) } return case event := <-events: diff --git a/services/rpcfilters/api.go b/services/rpcfilters/api.go index 79b5f82f82c..802b716ba32 100644 --- a/services/rpcfilters/api.go +++ b/services/rpcfilters/api.go @@ -8,14 +8,15 @@ import ( "time" "github.com/pborman/uuid" + "go.uber.org/zap" ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/filters" - "github.com/ethereum/go-ethereum/log" getrpc "github.com/ethereum/go-ethereum/rpc" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) const ( @@ -146,7 +147,7 @@ func (api *PublicAPI) NewBlockFilter() getrpc.ID { select { case hash := <-s: if err := f.add(hash); err != nil { - log.Error("error adding value to filter", "hash", hash, "error", err) + logutils.ZapLogger().Error("error adding value to filter", zap.Stringer("hash", hash), zap.Error(err)) } case <-f.done: return @@ -182,7 +183,7 @@ func (api *PublicAPI) NewPendingTransactionFilter() getrpc.ID { select { case hash := <-s: if err := f.add(hash); err != nil { - log.Error("error adding value to filter", "hash", hash, "error", err) + logutils.ZapLogger().Error("error adding value to filter", zap.Any("hash", hash), zap.Error(err)) } case <-f.done: return diff --git a/services/rpcfilters/latest_block_changed_event.go b/services/rpcfilters/latest_block_changed_event.go index 520662c222f..491d7349b38 100644 --- a/services/rpcfilters/latest_block_changed_event.go +++ b/services/rpcfilters/latest_block_changed_event.go @@ -5,9 +5,11 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) const ( @@ -89,7 +91,7 @@ func (e *latestBlockChangedEvent) Start() error { } latestBlock, err := e.provider.GetLatestBlock() if err != nil { - log.Error("error while receiving latest block", "error", err) + logutils.ZapLogger().Error("error while receiving latest block", zap.Error(err)) continue } diff --git a/services/rpcfilters/latest_logs.go b/services/rpcfilters/latest_logs.go index 92e90f294d0..f52850748ff 100644 --- a/services/rpcfilters/latest_logs.go +++ b/services/rpcfilters/latest_logs.go @@ -5,12 +5,14 @@ import ( "math/big" "time" + "go.uber.org/zap" + ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" getRpc "github.com/ethereum/go-ethereum/rpc" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) // ContextCaller provides CallContext method as ethereums rpc.Client. @@ -25,11 +27,11 @@ func pollLogs(client ContextCaller, chainID uint64, f *logsFilter, timeout, peri defer cancel() logs, err := getLogs(ctx, client, chainID, f.criteria()) if err != nil { - log.Error("Error fetch logs", "criteria", f.crit, "error", err) + logutils.ZapLogger().Error("Error fetch logs", zap.Any("criteria", f.crit), zap.Error(err)) return } if err := f.add(logs); err != nil { - log.Error("Error adding logs", "logs", logs, "error", err) + logutils.ZapLogger().Error("Error adding logs", zap.Any("logs", logs), zap.Error(err)) } } query() @@ -40,7 +42,7 @@ func pollLogs(client ContextCaller, chainID uint64, f *logsFilter, timeout, peri case <-latest.C: query() case <-f.done: - log.Debug("Filter was stopped", "ID", f.id, "crit", f.crit) + logutils.ZapLogger().Debug("Filter was stopped", zap.String("ID", string(f.id)), zap.Any("crit", f.crit)) return } } diff --git a/services/rpcfilters/transaction_sent_to_upstream_event.go b/services/rpcfilters/transaction_sent_to_upstream_event.go index 450f1514049..15d8a72587d 100644 --- a/services/rpcfilters/transaction_sent_to_upstream_event.go +++ b/services/rpcfilters/transaction_sent_to_upstream_event.go @@ -4,9 +4,11 @@ import ( "errors" "sync" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) type PendingTxInfo struct { @@ -71,7 +73,7 @@ func (e *transactionSentToUpstreamEvent) processTransactionSentToUpstream(transa select { case channel <- transactionInfo: default: - log.Error("dropping messages %s for subscriotion %d because the channel is full", transactionInfo, id) + logutils.ZapLogger().Error("dropping messages because the channel is full", zap.Any("transactionInfo", transactionInfo), zap.Int("id", id)) } } } diff --git a/services/shhext/api_nimbus.go b/services/shhext/api_nimbus.go index 4a42346b265..8ff6950eed1 100644 --- a/services/shhext/api_nimbus.go +++ b/services/shhext/api_nimbus.go @@ -8,8 +8,6 @@ import ( "github.com/status-im/status-go/protocol/common" - "github.com/ethereum/go-ethereum/log" - "github.com/status-im/status-go/eth-node/types" enstypes "github.com/status-im/status-go/eth-node/types/ens" "github.com/status-im/status-go/protocol" @@ -25,7 +23,6 @@ import ( type NimbusPublicAPI struct { service *NimbusService publicAPI types.PublicWhisperAPI - log log.Logger } // NewPublicAPI returns instance of the public API. @@ -33,219 +30,9 @@ func NewNimbusPublicAPI(s *NimbusService) *NimbusPublicAPI { return &NimbusPublicAPI{ service: s, publicAPI: s.w.PublicWhisperAPI(), - log: log.New("package", "status-go/services/sshext.NimbusPublicAPI"), } } -// func (api *NimbusPublicAPI) getPeer(rawurl string) (*enode.Node, error) { -// if len(rawurl) == 0 { -// return mailservers.GetFirstConnected(api.service.server, api.service.peerStore) -// } -// return enode.ParseV4(rawurl) -// } - -// // RetryConfig specifies configuration for retries with timeout and max amount of retries. -// type RetryConfig struct { -// BaseTimeout time.Duration -// // StepTimeout defines duration increase per each retry. -// StepTimeout time.Duration -// MaxRetries int -// } - -// RequestMessagesSync repeats MessagesRequest using configuration in retry conf. -// func (api *NimbusPublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) (MessagesResponse, error) { -// var resp MessagesResponse - -// shh := api.service.w -// events := make(chan types.EnvelopeEvent, 10) -// var ( -// requestID types.HexBytes -// err error -// retries int -// ) -// for retries <= conf.MaxRetries { -// sub := shh.SubscribeEnvelopeEvents(events) -// r.Timeout = conf.BaseTimeout + conf.StepTimeout*time.Duration(retries) -// timeout := r.Timeout -// // FIXME this weird conversion is required because MessagesRequest expects seconds but defines time.Duration -// r.Timeout = time.Duration(int(r.Timeout.Seconds())) -// requestID, err = api.RequestMessages(context.Background(), r) -// if err != nil { -// sub.Unsubscribe() -// return resp, err -// } -// mailServerResp, err := waitForExpiredOrCompleted(types.BytesToHash(requestID), events, timeout) -// sub.Unsubscribe() -// if err == nil { -// resp.Cursor = hex.EncodeToString(mailServerResp.Cursor) -// resp.Error = mailServerResp.Error -// return resp, nil -// } -// retries++ -// api.log.Error("[RequestMessagesSync] failed", "err", err, "retries", retries) -// } -// return resp, fmt.Errorf("failed to request messages after %d retries", retries) -// } - -// func waitForExpiredOrCompleted(requestID types.Hash, events chan types.EnvelopeEvent, timeout time.Duration) (*types.MailServerResponse, error) { -// expired := fmt.Errorf("request %x expired", requestID) -// after := time.NewTimer(timeout) -// defer after.Stop() -// for { -// var ev types.EnvelopeEvent -// select { -// case ev = <-events: -// case <-after.C: -// return nil, expired -// } -// if ev.Hash != requestID { -// continue -// } -// switch ev.Event { -// case types.EventMailServerRequestCompleted: -// data, ok := ev.Data.(*types.MailServerResponse) -// if ok { -// return data, nil -// } -// return nil, errors.New("invalid event data type") -// case types.EventMailServerRequestExpired: -// return nil, expired -// } -// } -// } - -// // RequestMessages sends a request for historic messages to a MailServer. -// func (api *NimbusPublicAPI) RequestMessages(_ context.Context, r MessagesRequest) (types.HexBytes, error) { -// api.log.Info("RequestMessages", "request", r) -// shh := api.service.w -// now := api.service.w.GetCurrentTime() -// r.setDefaults(now) - -// if r.From > r.To { -// return nil, fmt.Errorf("Query range is invalid: from > to (%d > %d)", r.From, r.To) -// } - -// mailServerNode, err := api.getPeer(r.MailServerPeer) -// if err != nil { -// return nil, fmt.Errorf("%v: %v", ErrInvalidMailServerPeer, err) -// } - -// var ( -// symKey []byte -// publicKey *ecdsa.PublicKey -// ) - -// if r.SymKeyID != "" { -// symKey, err = shh.GetSymKey(r.SymKeyID) -// if err != nil { -// return nil, fmt.Errorf("%v: %v", ErrInvalidSymKeyID, err) -// } -// } else { -// publicKey = mailServerNode.Pubkey() -// } - -// payload, err := makeMessagesRequestPayload(r) -// if err != nil { -// return nil, err -// } - -// envelope, err := makeEnvelop( -// payload, -// symKey, -// publicKey, -// api.service.nodeID, -// shh.MinPow(), -// now, -// ) -// if err != nil { -// return nil, err -// } -// hash := envelope.Hash() - -// if !r.Force { -// err = api.service.requestsRegistry.Register(hash, r.Topics) -// if err != nil { -// return nil, err -// } -// } - -// if err := shh.RequestHistoricMessagesWithTimeout(mailServerNode.ID().Bytes(), envelope, r.Timeout*time.Second); err != nil { -// if !r.Force { -// api.service.requestsRegistry.Unregister(hash) -// } -// return nil, err -// } - -// return hash[:], nil -// } - -// // createSyncMailRequest creates SyncMailRequest. It uses a full bloom filter -// // if no topics are given. -// func createSyncMailRequest(r SyncMessagesRequest) (types.SyncMailRequest, error) { -// var bloom []byte -// if len(r.Topics) > 0 { -// bloom = topicsToBloom(r.Topics...) -// } else { -// bloom = types.MakeFullNodeBloom() -// } - -// cursor, err := hex.DecodeString(r.Cursor) -// if err != nil { -// return types.SyncMailRequest{}, err -// } - -// return types.SyncMailRequest{ -// Lower: r.From, -// Upper: r.To, -// Bloom: bloom, -// Limit: r.Limit, -// Cursor: cursor, -// }, nil -// } - -// func createSyncMessagesResponse(r types.SyncEventResponse) SyncMessagesResponse { -// return SyncMessagesResponse{ -// Cursor: hex.EncodeToString(r.Cursor), -// Error: r.Error, -// } -// } - -// // SyncMessages sends a request to a given MailServerPeer to sync historic messages. -// // MailServerPeers needs to be added as a trusted peer first. -// func (api *NimbusPublicAPI) SyncMessages(ctx context.Context, r SyncMessagesRequest) (SyncMessagesResponse, error) { -// log.Info("SyncMessages start", "request", r) - -// var response SyncMessagesResponse - -// mailServerEnode, err := enode.ParseV4(r.MailServerPeer) -// if err != nil { -// return response, fmt.Errorf("invalid MailServerPeer: %v", err) -// } -// mailServerID := mailServerEnode.ID().Bytes() - -// request, err := createSyncMailRequest(r) -// if err != nil { -// return response, fmt.Errorf("failed to create a sync mail request: %v", err) -// } - -// for { -// log.Info("Sending a request to sync messages", "request", request) - -// resp, err := api.service.syncMessages(ctx, mailServerID, request) -// if err != nil { -// return response, err -// } - -// log.Info("Syncing messages response", "error", resp.Error, "cursor", fmt.Sprintf("%#x", resp.Cursor)) - -// if resp.Error != "" || len(resp.Cursor) == 0 || !r.FollowCursor { -// return createSyncMessagesResponse(resp), nil -// } - -// request.Cursor = resp.Cursor -// } -// } - // ConfirmMessagesProcessedByID is a method to confirm that messages was consumed by // the client side. // TODO: this is broken now as it requires dedup ID while a message hash should be used. @@ -300,120 +87,11 @@ func (api *NimbusPublicAPI) ConfirmJoiningGroup(ctx context.Context, chatID stri return api.service.messenger.ConfirmJoiningGroup(ctx, chatID) } -// func (api *NimbusPublicAPI) requestMessagesUsingPayload(request db.HistoryRequest, peer, symkeyID string, payload []byte, force bool, timeout time.Duration, topics []types.TopicType) (hash types.Hash, err error) { -// shh := api.service.w -// now := api.service.w.GetCurrentTime() - -// mailServerNode, err := api.getPeer(peer) -// if err != nil { -// return hash, fmt.Errorf("%v: %v", ErrInvalidMailServerPeer, err) -// } - -// var ( -// symKey []byte -// publicKey *ecdsa.PublicKey -// ) - -// if symkeyID != "" { -// symKey, err = shh.GetSymKey(symkeyID) -// if err != nil { -// return hash, fmt.Errorf("%v: %v", ErrInvalidSymKeyID, err) -// } -// } else { -// publicKey = mailServerNode.Pubkey() -// } - -// envelope, err := makeEnvelop( -// payload, -// symKey, -// publicKey, -// api.service.nodeID, -// shh.MinPow(), -// now, -// ) -// if err != nil { -// return hash, err -// } -// hash = envelope.Hash() - -// err = request.Replace(hash) -// if err != nil { -// return hash, err -// } - -// if !force { -// err = api.service.requestsRegistry.Register(hash, topics) -// if err != nil { -// return hash, err -// } -// } - -// if err := shh.RequestHistoricMessagesWithTimeout(mailServerNode.ID().Bytes(), envelope, timeout); err != nil { -// if !force { -// api.service.requestsRegistry.Unregister(hash) -// } -// return hash, err -// } - -// return hash, nil - -// } - -// // InitiateHistoryRequests is a stateful API for initiating history request for each topic. -// // Caller of this method needs to define only two parameters per each TopicRequest: -// // - Topic -// // - Duration in nanoseconds. Will be used to determine starting time for history request. -// // After that status-go will guarantee that request for this topic and date will be performed. -// func (api *NimbusPublicAPI) InitiateHistoryRequests(parent context.Context, request InitiateHistoryRequestParams) (rst []types.HexBytes, err error) { -// tx := api.service.storage.NewTx() -// defer func() { -// if err == nil { -// err = tx.Commit() -// } -// }() -// ctx := NewContextFromService(parent, api.service, tx) -// requests, err := api.service.historyUpdates.CreateRequests(ctx, request.Requests) -// if err != nil { -// return nil, err -// } -// var ( -// payload []byte -// hash types.Hash -// ) -// for i := range requests { -// req := requests[i] -// options := CreateTopicOptionsFromRequest(req) -// bloom := options.ToBloomFilterOption() -// payload, err = bloom.ToMessagesRequestPayload() -// if err != nil { -// return rst, err -// } -// hash, err = api.requestMessagesUsingPayload(req, request.Peer, request.SymKeyID, payload, request.Force, request.Timeout, options.Topics()) -// if err != nil { -// return rst, err -// } -// rst = append(rst, hash.Bytes()) -// } -// return rst, err -// } - -// // CompleteRequest client must mark request completed when all envelopes were processed. -// func (api *NimbusPublicAPI) CompleteRequest(parent context.Context, hex string) (err error) { -// tx := api.service.storage.NewTx() -// ctx := NewContextFromService(parent, api.service, tx) -// err = api.service.historyUpdates.UpdateFinishedRequest(ctx, types.HexToHash(hex)) -// if err == nil { -// return tx.Commit() -// } -// return err -// } - func (api *NimbusPublicAPI) LoadFilters(parent context.Context, chats []*transport.Filter) ([]*transport.Filter, error) { return api.service.messenger.LoadFilters(chats) } func (api *NimbusPublicAPI) SaveChat(parent context.Context, chat *protocol.Chat) error { - api.log.Info("saving chat", "chat", chat) return api.service.messenger.SaveChat(chat) } @@ -430,7 +108,6 @@ func (api *NimbusPublicAPI) SaveContact(parent context.Context, contact *protoco } func (api *NimbusPublicAPI) BlockContact(parent context.Context, contact *protocol.Contact) (*protocol.MessengerResponse, error) { - api.log.Info("blocking contact", "contact", contact.ID) return api.service.messenger.BlockContact(contact) } @@ -559,90 +236,3 @@ func (api *NimbusPublicAPI) SendPairInstallation(ctx context.Context) (*protocol func (api *NimbusPublicAPI) SyncDevices(ctx context.Context, name, picture string) error { return api.service.messenger.SyncDevices(ctx, name, picture) } - -// ----- -// HELPER -// ----- - -// makeEnvelop makes an envelop for a historic messages request. -// Symmetric key is used to authenticate to MailServer. -// PK is the current node ID. -// func makeEnvelop( -// payload []byte, -// symKey []byte, -// publicKey *ecdsa.PublicKey, -// nodeID *ecdsa.PrivateKey, -// pow float64, -// now time.Time, -// ) (types.Envelope, error) { -// params := whisper.MessageParams{ -// PoW: pow, -// Payload: payload, -// WorkTime: defaultWorkTime, -// Src: nodeID, -// } -// // Either symKey or public key is required. -// // This condition is verified in `message.Wrap()` method. -// if len(symKey) > 0 { -// params.KeySym = symKey -// } else if publicKey != nil { -// params.Dst = publicKey -// } -// message, err := whisper.NewSentMessage(¶ms) -// if err != nil { -// return nil, err -// } -// envelope, err := message.Wrap(¶ms, now) -// if err != nil { -// return nil, err -// } -// return nimbusbridge.NewNimbusEnvelopeWrapper(envelope), nil -// } - -// // makeMessagesRequestPayload makes a specific payload for MailServer -// // to request historic messages. -// func makeMessagesRequestPayload(r MessagesRequest) ([]byte, error) { -// cursor, err := hex.DecodeString(r.Cursor) -// if err != nil { -// return nil, fmt.Errorf("invalid cursor: %v", err) -// } - -// if len(cursor) > 0 && len(cursor) != mailserver.CursorLength { -// return nil, fmt.Errorf("invalid cursor size: expected %d but got %d", mailserver.CursorLength, len(cursor)) -// } - -// payload := mailserver.MessagesRequestPayload{ -// Lower: r.From, -// Upper: r.To, -// Bloom: createBloomFilter(r), -// Limit: r.Limit, -// Cursor: cursor, -// // Client must tell the MailServer if it supports batch responses. -// // This can be removed in the future. -// Batch: true, -// } - -// return rlp.EncodeToBytes(payload) -// } - -// func createBloomFilter(r MessagesRequest) []byte { -// if len(r.Topics) > 0 { -// return topicsToBloom(r.Topics...) -// } - -// return types.TopicToBloom(r.Topic) -// } - -// func topicsToBloom(topics ...types.TopicType) []byte { -// i := new(big.Int) -// for _, topic := range topics { -// bloom := types.TopicToBloom(topic) -// i.Or(i, new(big.Int).SetBytes(bloom[:])) -// } - -// combined := make([]byte, types.BloomFilterSize) -// data := i.Bytes() -// copy(combined[types.BloomFilterSize-len(data):], data[:]) - -// return combined -// } diff --git a/services/stickers/api.go b/services/stickers/api.go index 4d3c2598265..0a2e5c88318 100644 --- a/services/stickers/api.go +++ b/services/stickers/api.go @@ -6,18 +6,19 @@ import ( "time" "github.com/zenthangplus/goccm" + "go.uber.org/zap" "olympos.io/encoding/edn" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/account" gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/contracts" "github.com/status-im/status-go/contracts/stickers" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/ipfs" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/server" @@ -291,7 +292,9 @@ func (api *API) fetchStickerPacks(chainID uint64, resultChan chan<- *StickerPack stickerPack, err := api.fetchPackData(stickerType, packID, true) if err != nil { - log.Warn("Could not retrieve stickerpack data", "packID", packID, "error", err) + logutils.ZapLogger().Warn("Could not retrieve stickerpack data", + zap.Uint64("packID", packID.Uint64()), + zap.Error(err)) errChan <- err return } diff --git a/services/subscriptions/subscriptions.go b/services/subscriptions/subscriptions.go index f407971db2d..ef2bca09b2a 100644 --- a/services/subscriptions/subscriptions.go +++ b/services/subscriptions/subscriptions.go @@ -5,22 +5,24 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) type Subscriptions struct { mu sync.Mutex subs map[SubscriptionID]*Subscription checkPeriod time.Duration - log log.Logger + logger *zap.Logger } func NewSubscriptions(period time.Duration) *Subscriptions { return &Subscriptions{ subs: make(map[SubscriptionID]*Subscription), checkPeriod: period, - log: log.New("package", "status-go/services/subsriptions.Subscriptions"), + logger: logutils.ZapLogger().Named("subscriptionsService"), } } @@ -34,7 +36,7 @@ func (s *Subscriptions) Create(namespace string, filter filter) (SubscriptionID, defer gocommon.LogOnPanic() err := newSub.Start(s.checkPeriod) if err != nil { - s.log.Error("error while starting subscription", "err", err) + s.logger.Error("error while starting subscription", zap.Error(err)) } }() diff --git a/services/updates/api.go b/services/updates/api.go index 39c490ad3fe..2e0117c239b 100644 --- a/services/updates/api.go +++ b/services/updates/api.go @@ -11,8 +11,8 @@ import ( "github.com/hashicorp/go-version" "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/ens" "github.com/status-im/status-go/signal" ) @@ -37,13 +37,13 @@ func (api *API) Check(ctx context.Context, chainID uint64, ens string, currentVe current, err := version.NewVersion(currentVersion) if err != nil { - log.Error("invalid current version", "err", err) + logutils.ZapLogger().Error("invalid current version", zap.Error(err)) return } uri, err := api.ensService.API().ResourceURL(ctx, chainID, ens) if err != nil || uri.Host == "" { - log.Error("can't get obtain the updates content hash url", "ens", ens) + logutils.ZapLogger().Error("can't get obtain the updates content hash url", zap.String("ens", ens)) signal.SendUpdateAvailable(false, "", "") return } @@ -52,21 +52,21 @@ func (api *API) Check(ctx context.Context, chainID uint64, ens string, currentVe versionURL := url + "VERSION" response, err := api.httpClient.Get(versionURL) if err != nil { - log.Error("can't get content", zap.String("any", versionURL)) + logutils.ZapLogger().Error("can't get content", zap.String("any", versionURL)) signal.SendUpdateAvailable(false, "", "") return } defer response.Body.Close() if response.StatusCode != http.StatusOK { - log.Error(fmt.Sprintf("version verification response status error: %v", response.StatusCode)) + logutils.ZapLogger().Error(fmt.Sprintf("version verification response status error: %v", response.StatusCode)) signal.SendUpdateAvailable(false, "", "") return } data, err := ioutil.ReadAll(response.Body) if err != nil { - log.Error("version verification body err", "err", err) + logutils.ZapLogger().Error("version verification body err", zap.Error(err)) signal.SendUpdateAvailable(false, "", "") return } @@ -74,7 +74,7 @@ func (api *API) Check(ctx context.Context, chainID uint64, ens string, currentVe c := make(map[string]interface{}) err = json.Unmarshal(data, &c) if err != nil { - log.Error("invalid json", "err", err) + logutils.ZapLogger().Error("invalid json", zap.Error(err)) signal.SendUpdateAvailable(false, "", "") return } @@ -84,14 +84,14 @@ func (api *API) Check(ctx context.Context, chainID uint64, ens string, currentVe case string: latestStr = c["version"].(string) default: - log.Error("invalid latest version", "val", c["version"]) + logutils.ZapLogger().Error("invalid latest version", zap.Any("val", c["version"])) signal.SendUpdateAvailable(false, "", "") return } latest, err := version.NewVersion(latestStr) if err != nil { - log.Error("invalid latest version", "err", err) + logutils.ZapLogger().Error("invalid latest version", zap.Error(err)) signal.SendUpdateAvailable(false, "", "") return } diff --git a/services/wakuext/api.go b/services/wakuext/api.go index a021fdddc8b..b65e33a5a8c 100644 --- a/services/wakuext/api.go +++ b/services/wakuext/api.go @@ -1,7 +1,6 @@ package wakuext import ( - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/services/ext" ) @@ -11,7 +10,6 @@ type PublicAPI struct { *ext.PublicAPI service *Service publicAPI types.PublicWakuAPI - log log.Logger } // NewPublicAPI returns instance of the public API. @@ -20,6 +18,5 @@ func NewPublicAPI(s *Service) *PublicAPI { PublicAPI: ext.NewPublicAPI(s.Service, s.w), service: s, publicAPI: s.w.PublicWakuAPI(), - log: log.New("package", "status-go/services/wakuext.PublicAPI"), } } diff --git a/services/wakuv2ext/api.go b/services/wakuv2ext/api.go index e01a9d0d880..b7e8508bd2d 100644 --- a/services/wakuv2ext/api.go +++ b/services/wakuv2ext/api.go @@ -1,7 +1,6 @@ package wakuv2ext import ( - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/services/ext" ) @@ -11,7 +10,6 @@ type PublicAPI struct { *ext.PublicAPI service *Service publicAPI types.PublicWakuAPI - log log.Logger } // NewPublicAPI returns instance of the public API. @@ -20,6 +18,5 @@ func NewPublicAPI(s *Service) *PublicAPI { PublicAPI: ext.NewPublicAPI(s.Service, s.w), service: s, publicAPI: s.w.PublicWakuAPI(), - log: log.New("package", "status-go/services/wakuext.PublicAPI"), } } diff --git a/services/wallet/activity/activity.go b/services/wallet/activity/activity.go index 7c40bf2f34f..f5f8e8a551b 100644 --- a/services/wallet/activity/activity.go +++ b/services/wallet/activity/activity.go @@ -16,8 +16,8 @@ import ( eth "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/bigint" "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/thirdparty" @@ -564,7 +564,7 @@ func getActivityEntries(ctx context.Context, deps FilterDependencies, addresses return at, toAddress } } - log.Warn(fmt.Sprintf("unexpected activity type. Missing from [%s] or to [%s] in addresses?", fromAddress, toAddress)) + logutils.ZapLogger().Warn(fmt.Sprintf("unexpected activity type. Missing from [%s] or to [%s] in addresses?", fromAddress, toAddress)) return ReceiveAT, toAddress } @@ -720,7 +720,7 @@ func getTrInAndOutAmounts(activityType Type, trAmount sql.NullString, pTrAmount amount = pTrAmount ok = true } else { - log.Warn(fmt.Sprintf("invalid transaction amount for type %d", activityType)) + logutils.ZapLogger().Warn(fmt.Sprintf("invalid transaction amount for type %d", activityType)) } if ok { @@ -740,10 +740,10 @@ func getTrInAndOutAmounts(activityType Type, trAmount sql.NullString, pTrAmount outAmount = (*hexutil.Big)(big.NewInt(0)) return default: - log.Warn(fmt.Sprintf("unexpected activity type %d", activityType)) + logutils.ZapLogger().Warn(fmt.Sprintf("unexpected activity type %d", activityType)) } } else { - log.Warn(fmt.Sprintf("could not parse amount %s", trAmount.String)) + logutils.ZapLogger().Warn(fmt.Sprintf("could not parse amount %s", trAmount.String)) } inAmount = (*hexutil.Big)(big.NewInt(0)) @@ -764,9 +764,9 @@ func getMtInAndOutAmounts(dbFromAmount sql.NullString, dbToAmount sql.NullString return } } - log.Warn(fmt.Sprintf("could not parse amounts %s %s", fromHexStr, toHexStr)) + logutils.ZapLogger().Warn(fmt.Sprintf("could not parse amounts %s %s", fromHexStr, toHexStr)) } else { - log.Warn("invalid transaction amounts") + logutils.ZapLogger().Warn("invalid transaction amounts") } inAmount = (*hexutil.Big)(big.NewInt(0)) outAmount = (*hexutil.Big)(big.NewInt(0)) @@ -804,7 +804,7 @@ func transferTypeToTokenType(transferType *TransferType) TokenType { case TransferTypeErc1155: return Erc1155 default: - log.Error(fmt.Sprintf("unexpected transfer type %d", transferType)) + logutils.ZapLogger().Error(fmt.Sprintf("unexpected transfer type %d", transferType)) } return Native } diff --git a/services/wallet/activity/service.go b/services/wallet/activity/service.go index e313c31fef5..ac0dbb6cd62 100644 --- a/services/wallet/activity/service.go +++ b/services/wallet/activity/service.go @@ -10,10 +10,12 @@ import ( "sync/atomic" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/collectibles" @@ -248,11 +250,14 @@ func (s *Service) getActivityDetails(ctx context.Context, entries []Entry) ([]*E return nil, nil } - log.Debug("wallet.activity.Service lazyLoadDetails", "entries.len", len(entries), "ids.len", len(ids)) + logutils.ZapLogger().Debug("wallet.activity.Service lazyLoadDetails", + zap.Int("entries.len", len(entries)), + zap.Int("ids.len", len(ids)), + ) colData, err := s.collectibles.FetchAssetsByCollectibleUniqueID(ctx, ids, true) if err != nil { - log.Error("Error fetching collectible details", "error", err) + logutils.ZapLogger().Error("Error fetching collectible details", zap.Error(err)) return nil, err } @@ -397,7 +402,7 @@ func (s *Service) getDeps() FilterDependencies { func sendResponseEvent(eventFeed *event.Feed, requestID *int32, eventType walletevent.EventType, payloadObj interface{}, resErr error) { payload, err := json.Marshal(payloadObj) if err != nil { - log.Error("Error marshaling response: %v; result error: %w", err, resErr) + logutils.ZapLogger().Error("Error marshaling", zap.NamedError("response", err), zap.NamedError("result", resErr)) } else { err = resErr } @@ -406,7 +411,12 @@ func sendResponseEvent(eventFeed *event.Feed, requestID *int32, eventType wallet if requestID != nil { requestIDStr = strconv.Itoa(int(*requestID)) } - log.Debug("wallet.api.activity.Service RESPONSE", "requestID", requestIDStr, "eventType", eventType, "error", err, "payload.len", len(payload)) + logutils.ZapLogger().Debug("wallet.api.activity.Service RESPONSE", + zap.String("requestID", requestIDStr), + zap.String("eventType", string(eventType)), + zap.Error(err), + zap.Int("payload.len", len(payload)), + ) event := walletevent.Event{ Type: eventType, @@ -439,7 +449,7 @@ func (s *Service) areAllAddresses(addresses []common.Address) bool { // Compare with addresses in accountsDB walletAddresses, err := s.getWalletAddreses() if err != nil { - log.Error("Error getting wallet addresses", "error", err) + logutils.ZapLogger().Error("Error getting wallet addresses", zap.Error(err)) return false } diff --git a/services/wallet/activity/session.go b/services/wallet/activity/session.go index 5721fbe1445..b2c43d1d4de 100644 --- a/services/wallet/activity/session.go +++ b/services/wallet/activity/session.go @@ -6,10 +6,12 @@ import ( "strconv" "time" + "go.uber.org/zap" + eth "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/transfer" @@ -361,7 +363,7 @@ func (s *Service) detectNew(changeCount int) { allAddresses := s.areAllAddresses(session.addresses) activities, err := getActivityEntries(context.Background(), s.getDeps(), session.addresses, allAddresses, session.chainIDs, session.filter, 0, fetchLen) if err != nil { - log.Error("Error getting activity entries", "error", err) + logutils.ZapLogger().Error("Error getting activity entries", zap.Error(err)) continue } diff --git a/services/wallet/api.go b/services/wallet/api.go index 93402387387..03c822c223e 100644 --- a/services/wallet/api.go +++ b/services/wallet/api.go @@ -10,17 +10,18 @@ import ( "strings" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" gethrpc "github.com/ethereum/go-ethereum/rpc" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes" abi_spec "github.com/status-im/status-go/abi-spec" "github.com/status-im/status-go/account" - statusErrors "github.com/status-im/status-go/errors" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/rpc/network" "github.com/status-im/status-go/services/typeddata" @@ -31,71 +32,24 @@ import ( "github.com/status-im/status-go/services/wallet/history" "github.com/status-im/status-go/services/wallet/onramp" "github.com/status-im/status-go/services/wallet/requests" - "github.com/status-im/status-go/services/wallet/responses" "github.com/status-im/status-go/services/wallet/router" "github.com/status-im/status-go/services/wallet/router/fees" "github.com/status-im/status-go/services/wallet/router/pathprocessor" - "github.com/status-im/status-go/services/wallet/router/sendtype" "github.com/status-im/status-go/services/wallet/thirdparty" "github.com/status-im/status-go/services/wallet/token" "github.com/status-im/status-go/services/wallet/transfer" "github.com/status-im/status-go/services/wallet/walletconnect" - "github.com/status-im/status-go/signal" "github.com/status-im/status-go/transactions" ) func NewAPI(s *Service) *API { - rpcClient := s.GetRPCClient() - transactor := s.GetTransactor() - tokenManager := s.GetTokenManager() - ensService := s.GetEnsService() - stickersService := s.GetStickersService() - featureFlags := s.FeatureFlags() - - router := router.NewRouter(rpcClient, transactor, tokenManager, s.GetMarketManager(), s.GetCollectiblesService(), - s.GetCollectiblesManager(), ensService, stickersService) - - transfer := pathprocessor.NewTransferProcessor(rpcClient, transactor) - router.AddPathProcessor(transfer) - - erc721Transfer := pathprocessor.NewERC721Processor(rpcClient, transactor) - router.AddPathProcessor(erc721Transfer) - - erc1155Transfer := pathprocessor.NewERC1155Processor(rpcClient, transactor) - router.AddPathProcessor(erc1155Transfer) - - hop := pathprocessor.NewHopBridgeProcessor(rpcClient, transactor, tokenManager, rpcClient.NetworkManager) - router.AddPathProcessor(hop) - - if featureFlags.EnableCelerBridge { - // TODO: Celar Bridge is out of scope for 2.30, check it thoroughly once we decide to include it again - cbridge := pathprocessor.NewCelerBridgeProcessor(rpcClient, transactor, tokenManager) - router.AddPathProcessor(cbridge) - } - - paraswap := pathprocessor.NewSwapParaswapProcessor(rpcClient, transactor, tokenManager) - router.AddPathProcessor(paraswap) - - ensRegister := pathprocessor.NewENSRegisterProcessor(rpcClient, transactor, ensService) - router.AddPathProcessor(ensRegister) - - ensRelease := pathprocessor.NewENSReleaseProcessor(rpcClient, transactor, ensService) - router.AddPathProcessor(ensRelease) - - ensPublicKey := pathprocessor.NewENSPublicKeyProcessor(rpcClient, transactor, ensService) - router.AddPathProcessor(ensPublicKey) - - buyStickers := pathprocessor.NewStickersBuyProcessor(rpcClient, transactor, stickersService) - router.AddPathProcessor(buyStickers) - - return &API{s, s.reader, router} + return &API{s, s.reader} } // API is class with methods available over RPC. type API struct { s *Service reader *Reader - router *router.Router } func (api *API) StartWallet(ctx context.Context) error { @@ -103,7 +57,6 @@ func (api *API) StartWallet(ctx context.Context) error { } func (api *API) StopWallet(ctx context.Context) error { - api.router.Stop() return api.s.Stop() } @@ -191,7 +144,7 @@ func hexBigToBN(hexBig *hexutil.Big) *big.Int { // @deprecated // GetTransfersByAddress returns transfers for a single address func (api *API) GetTransfersByAddress(ctx context.Context, address common.Address, toBlock, limit *hexutil.Big, fetchMore bool) ([]transfer.View, error) { - log.Debug("[WalletAPI:: GetTransfersByAddress] get transfers for an address", "address", address) + logutils.ZapLogger().Debug("[WalletAPI:: GetTransfersByAddress] get transfers for an address", zap.Stringer("address", address)) var intLimit = int64(1) if limit != nil { intLimit = limit.ToInt().Int64() @@ -203,32 +156,38 @@ func (api *API) GetTransfersByAddress(ctx context.Context, address common.Addres // LoadTransferByHash loads transfer to the database // Only used by status-mobile func (api *API) LoadTransferByHash(ctx context.Context, address common.Address, hash common.Hash) error { - log.Debug("[WalletAPI:: LoadTransferByHash] get transfer by hash", "address", address, "hash", hash) + logutils.ZapLogger().Debug("[WalletAPI:: LoadTransferByHash] get transfer by hash", zap.Stringer("address", address), zap.Stringer("hash", hash)) return api.s.transferController.LoadTransferByHash(ctx, api.s.rpcClient, address, hash) } // @deprecated func (api *API) GetTransfersByAddressAndChainID(ctx context.Context, chainID uint64, address common.Address, toBlock, limit *hexutil.Big, fetchMore bool) ([]transfer.View, error) { - log.Debug("[WalletAPI:: GetTransfersByAddressAndChainIDs] get transfers for an address", "address", address) + logutils.ZapLogger().Debug("[WalletAPI:: GetTransfersByAddressAndChainIDs] get transfers for an address", zap.Stringer("address", address)) return api.s.transferController.GetTransfersByAddress(ctx, chainID, address, hexBigToBN(toBlock), limit.ToInt().Int64(), fetchMore) } // @deprecated func (api *API) GetTransfersForIdentities(ctx context.Context, identities []transfer.TransactionIdentity) ([]transfer.View, error) { - log.Debug("wallet.api.GetTransfersForIdentities", "identities.len", len(identities)) + logutils.ZapLogger().Debug("wallet.api.GetTransfersForIdentities", zap.Int("identities.len", len(identities))) return api.s.transferController.GetTransfersForIdentities(ctx, identities) } func (api *API) FetchDecodedTxData(ctx context.Context, data string) (*thirdparty.DataParsed, error) { - log.Debug("[Wallet: FetchDecodedTxData]") + logutils.ZapLogger().Debug("[Wallet: FetchDecodedTxData]") return api.s.decoder.Decode(data) } // GetBalanceHistory retrieves token balance history for token identity on multiple chains func (api *API) GetBalanceHistory(ctx context.Context, chainIDs []uint64, addresses []common.Address, tokenSymbol string, currencySymbol string, timeInterval history.TimeInterval) ([]*history.ValuePoint, error) { - log.Debug("wallet.api.GetBalanceHistory", "chainIDs", chainIDs, "address", addresses, "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "timeInterval", timeInterval) + logutils.ZapLogger().Debug("wallet.api.GetBalanceHistory", + zap.Uint64s("chainIDs", chainIDs), + zap.Stringers("address", addresses), + zap.String("tokenSymbol", tokenSymbol), + zap.String("currencySymbol", currencySymbol), + zap.Int("timeInterval", int(timeInterval)), + ) var fromTimestamp uint64 now := uint64(time.Now().UTC().Unix()) @@ -253,70 +212,76 @@ func (api *API) GetBalanceHistory(ctx context.Context, chainIDs []uint64, addres // GetBalanceHistoryRange retrieves token balance history for token identity on multiple chains for a time range // 'toTimestamp' is ignored for now, but will be used in the future to limit the range of the history func (api *API) GetBalanceHistoryRange(ctx context.Context, chainIDs []uint64, addresses []common.Address, tokenSymbol string, currencySymbol string, fromTimestamp uint64, _ uint64) ([]*history.ValuePoint, error) { - log.Debug("wallet.api.GetBalanceHistoryRange", "chainIDs", chainIDs, "address", addresses, "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "fromTimestamp", fromTimestamp) + logutils.ZapLogger().Debug("wallet.api.GetBalanceHistoryRange", + zap.Uint64s("chainIDs", chainIDs), + zap.Stringers("address", addresses), + zap.String("tokenSymbol", tokenSymbol), + zap.String("currencySymbol", currencySymbol), + zap.Uint64("fromTimestamp", fromTimestamp), + ) return api.s.history.GetBalanceHistory(ctx, chainIDs, addresses, tokenSymbol, currencySymbol, fromTimestamp) } func (api *API) GetTokenList(ctx context.Context) (*token.ListWrapper, error) { - log.Debug("call to get token list") + logutils.ZapLogger().Debug("call to get token list") rst := api.s.tokenManager.GetList() - log.Debug("result from token list", "len", len(rst.Data)) + logutils.ZapLogger().Debug("result from token list", zap.Int("len", len(rst.Data))) return rst, nil } // @deprecated func (api *API) GetTokens(ctx context.Context, chainID uint64) ([]*token.Token, error) { - log.Debug("call to get tokens") + logutils.ZapLogger().Debug("call to get tokens") rst, err := api.s.tokenManager.GetTokens(chainID) - log.Debug("result from token store", "len", len(rst)) + logutils.ZapLogger().Debug("result from token store", zap.Int("len", len(rst))) return rst, err } // @deprecated func (api *API) GetCustomTokens(ctx context.Context) ([]*token.Token, error) { - log.Debug("call to get custom tokens") + logutils.ZapLogger().Debug("call to get custom tokens") rst, err := api.s.tokenManager.GetCustoms(true) - log.Debug("result from database for custom tokens", "len", len(rst)) + logutils.ZapLogger().Debug("result from database for custom tokens", zap.Int("len", len(rst))) return rst, err } func (api *API) DiscoverToken(ctx context.Context, chainID uint64, address common.Address) (*token.Token, error) { - log.Debug("call to get discover token") + logutils.ZapLogger().Debug("call to get discover token") token, err := api.s.tokenManager.DiscoverToken(ctx, chainID, address) return token, err } func (api *API) AddCustomToken(ctx context.Context, token token.Token) error { - log.Debug("call to create or edit custom token") + logutils.ZapLogger().Debug("call to create or edit custom token") if token.ChainID == 0 { token.ChainID = api.s.rpcClient.UpstreamChainID } err := api.s.tokenManager.UpsertCustom(token) - log.Debug("result from database for create or edit custom token", "err", err) + logutils.ZapLogger().Debug("result from database for create or edit custom token", zap.Error(err)) return err } // @deprecated func (api *API) DeleteCustomToken(ctx context.Context, address common.Address) error { - log.Debug("call to remove custom token") + logutils.ZapLogger().Debug("call to remove custom token") err := api.s.tokenManager.DeleteCustom(api.s.rpcClient.UpstreamChainID, address) - log.Debug("result from database for remove custom token", "err", err) + logutils.ZapLogger().Debug("result from database for remove custom token", zap.Error(err)) return err } func (api *API) DeleteCustomTokenByChainID(ctx context.Context, chainID uint64, address common.Address) error { - log.Debug("call to remove custom token") + logutils.ZapLogger().Debug("call to remove custom token") err := api.s.tokenManager.DeleteCustom(chainID, address) - log.Debug("result from database for remove custom token", "err", err) + logutils.ZapLogger().Debug("result from database for remove custom token", zap.Error(err)) return err } // @deprecated // Not used by status-desktop anymore func (api *API) GetPendingTransactions(ctx context.Context) ([]*transactions.PendingTransaction, error) { - log.Debug("wallet.api.GetPendingTransactions") + logutils.ZapLogger().Debug("wallet.api.GetPendingTransactions") rst, err := api.s.pendingTxManager.GetAllPending() - log.Debug("wallet.api.GetPendingTransactions RESULT", "len", len(rst)) + logutils.ZapLogger().Debug("wallet.api.GetPendingTransactions RESULT", zap.Int("len", len(rst))) return rst, err } @@ -325,7 +290,7 @@ func (api *API) GetPendingTransactions(ctx context.Context) ([]*transactions.Pen func (api *API) GetPendingTransactionsForIdentities(ctx context.Context, identities []transfer.TransactionIdentity) ( result []*transactions.PendingTransaction, err error) { - log.Debug("wallet.api.GetPendingTransactionsForIdentities") + logutils.ZapLogger().Debug("wallet.api.GetPendingTransactionsForIdentities") result = make([]*transactions.PendingTransaction, 0, len(identities)) var pt *transactions.PendingTransaction @@ -334,28 +299,32 @@ func (api *API) GetPendingTransactionsForIdentities(ctx context.Context, identit result = append(result, pt) } - log.Debug("wallet.api.GetPendingTransactionsForIdentities RES", "len", len(result)) + logutils.ZapLogger().Debug("wallet.api.GetPendingTransactionsForIdentities RES", zap.Int("len", len(result))) return } // @deprecated // TODO - #11861: Remove this and replace with EventPendingTransactionStatusChanged event and Delete to confirm the transaction where it is needed func (api *API) WatchTransactionByChainID(ctx context.Context, chainID uint64, transactionHash common.Hash) (err error) { - log.Debug("wallet.api.WatchTransactionByChainID", "chainID", chainID, "transactionHash", transactionHash) + logutils.ZapLogger().Debug("wallet.api.WatchTransactionByChainID", zap.Uint64("chainID", chainID), zap.Stringer("transactionHash", transactionHash)) defer func() { - log.Debug("wallet.api.WatchTransactionByChainID return", "err", err, "chainID", chainID, "transactionHash", transactionHash) + logutils.ZapLogger().Debug("wallet.api.WatchTransactionByChainID", + zap.Error(err), + zap.Uint64("chainID", chainID), + zap.Stringer("transactionHash", transactionHash), + ) }() return api.s.transactionManager.WatchTransaction(ctx, chainID, transactionHash) } func (api *API) GetCryptoOnRamps(ctx context.Context) ([]onramp.CryptoOnRamp, error) { - log.Debug("call to GetCryptoOnRamps") + logutils.ZapLogger().Debug("call to GetCryptoOnRamps") return api.s.cryptoOnRampManager.GetProviders(ctx) } func (api *API) GetCryptoOnRampURL(ctx context.Context, providerID string, parameters onramp.Parameters) (string, error) { - log.Debug("call to GetCryptoOnRampURL") + logutils.ZapLogger().Debug("call to GetCryptoOnRampURL") return api.s.cryptoOnRampManager.GetURL(ctx, providerID, parameters) } @@ -364,13 +333,13 @@ func (api *API) GetCryptoOnRampURL(ctx context.Context, providerID string, param */ func (api *API) FetchCachedBalancesByOwnerAndContractAddress(ctx context.Context, chainID wcommon.ChainID, ownerAddress common.Address, contractAddresses []common.Address) (thirdparty.TokenBalancesPerContractAddress, error) { - log.Debug("call to FetchCachedBalancesByOwnerAndContractAddress") + logutils.ZapLogger().Debug("call to FetchCachedBalancesByOwnerAndContractAddress") return api.s.collectiblesManager.FetchCachedBalancesByOwnerAndContractAddress(ctx, chainID, ownerAddress, contractAddresses) } func (api *API) FetchBalancesByOwnerAndContractAddress(ctx context.Context, chainID wcommon.ChainID, ownerAddress common.Address, contractAddresses []common.Address) (thirdparty.TokenBalancesPerContractAddress, error) { - log.Debug("call to FetchBalancesByOwnerAndContractAddress") + logutils.ZapLogger().Debug("call to FetchBalancesByOwnerAndContractAddress") return api.s.collectiblesManager.FetchBalancesByOwnerAndContractAddress(ctx, chainID, ownerAddress, contractAddresses) } @@ -380,49 +349,61 @@ func (api *API) GetCollectibleOwnership(id thirdparty.CollectibleUniqueID) ([]th } func (api *API) RefetchOwnedCollectibles() error { - log.Debug("wallet.api.RefetchOwnedCollectibles") + logutils.ZapLogger().Debug("wallet.api.RefetchOwnedCollectibles") api.s.collectibles.RefetchOwnedCollectibles() return nil } func (api *API) GetOwnedCollectiblesAsync(requestID int32, chainIDs []wcommon.ChainID, addresses []common.Address, filter collectibles.Filter, offset int, limit int, dataType collectibles.CollectibleDataType, fetchCriteria collectibles.FetchCriteria) error { - log.Debug("wallet.api.GetOwnedCollectiblesAsync", "requestID", requestID, "chainIDs.count", len(chainIDs), "addr.count", len(addresses), "offset", offset, "limit", limit, "dataType", dataType, "fetchCriteria", fetchCriteria) + logutils.ZapLogger().Debug("wallet.api.GetOwnedCollectiblesAsync", + zap.Int32("requestID", requestID), + zap.Int("chainIDs.count", len(chainIDs)), + zap.Int("addr.count", len(addresses)), + zap.Int("offset", offset), + zap.Int("limit", limit), + zap.Any("dataType", dataType), + zap.Any("fetchCriteria", fetchCriteria), + ) api.s.collectibles.GetOwnedCollectiblesAsync(requestID, chainIDs, addresses, filter, offset, limit, dataType, fetchCriteria) return nil } func (api *API) GetCollectiblesByUniqueIDAsync(requestID int32, uniqueIDs []thirdparty.CollectibleUniqueID, dataType collectibles.CollectibleDataType) error { - log.Debug("wallet.api.GetCollectiblesByUniqueIDAsync", "requestID", requestID, "uniqueIDs.count", len(uniqueIDs), "dataType", dataType) + logutils.ZapLogger().Debug("wallet.api.GetCollectiblesByUniqueIDAsync", + zap.Int32("requestID", requestID), + zap.Int("uniqueIDs.count", len(uniqueIDs)), + zap.Any("dataType", dataType), + ) api.s.collectibles.GetCollectiblesByUniqueIDAsync(requestID, uniqueIDs, dataType) return nil } func (api *API) FetchCollectionSocialsAsync(contractID thirdparty.ContractID) error { - log.Debug("wallet.api.FetchCollectionSocialsAsync", "contractID", contractID) + logutils.ZapLogger().Debug("wallet.api.FetchCollectionSocialsAsync", zap.Any("contractID", contractID)) return api.s.collectiblesManager.FetchCollectionSocialsAsync(contractID) } func (api *API) GetCollectibleOwnersByContractAddress(ctx context.Context, chainID wcommon.ChainID, contractAddress common.Address) (*thirdparty.CollectibleContractOwnership, error) { - log.Debug("call to GetCollectibleOwnersByContractAddress") + logutils.ZapLogger().Debug("call to GetCollectibleOwnersByContractAddress") return api.s.collectiblesManager.FetchCollectibleOwnersByContractAddress(ctx, chainID, contractAddress) } func (api *API) FetchCollectibleOwnersByContractAddress(ctx context.Context, chainID wcommon.ChainID, contractAddress common.Address) (*thirdparty.CollectibleContractOwnership, error) { - log.Debug("call to FetchCollectibleOwnersByContractAddress") + logutils.ZapLogger().Debug("call to FetchCollectibleOwnersByContractAddress") return api.s.collectiblesManager.FetchCollectibleOwnersByContractAddress(ctx, chainID, contractAddress) } func (api *API) SearchCollectibles(ctx context.Context, chainID wcommon.ChainID, text string, cursor string, limit int, providerID string) (*thirdparty.FullCollectibleDataContainer, error) { - log.Debug("call to SearchCollectibles") + logutils.ZapLogger().Debug("call to SearchCollectibles") return api.s.collectiblesManager.SearchCollectibles(ctx, chainID, text, cursor, limit, providerID) } func (api *API) SearchCollections(ctx context.Context, chainID wcommon.ChainID, text string, cursor string, limit int, providerID string) (*thirdparty.CollectionDataContainer, error) { - log.Debug("call to SearchCollections") + logutils.ZapLogger().Debug("call to SearchCollections") return api.s.collectiblesManager.SearchCollections(ctx, chainID, text, cursor, limit, providerID) } @@ -431,61 +412,61 @@ func (api *API) SearchCollections(ctx context.Context, chainID wcommon.ChainID, */ func (api *API) AddEthereumChain(ctx context.Context, network params.Network) error { - log.Debug("call to AddEthereumChain") + logutils.ZapLogger().Debug("call to AddEthereumChain") return api.s.rpcClient.NetworkManager.Upsert(&network) } func (api *API) DeleteEthereumChain(ctx context.Context, chainID uint64) error { - log.Debug("call to DeleteEthereumChain") + logutils.ZapLogger().Debug("call to DeleteEthereumChain") return api.s.rpcClient.NetworkManager.Delete(chainID) } func (api *API) GetEthereumChains(ctx context.Context) ([]*network.CombinedNetwork, error) { - log.Debug("call to GetEthereumChains") + logutils.ZapLogger().Debug("call to GetEthereumChains") return api.s.rpcClient.NetworkManager.GetCombinedNetworks() } // @deprecated func (api *API) FetchPrices(ctx context.Context, symbols []string, currencies []string) (map[string]map[string]float64, error) { - log.Debug("call to FetchPrices") + logutils.ZapLogger().Debug("call to FetchPrices") return api.s.marketManager.FetchPrices(symbols, currencies) } // @deprecated func (api *API) FetchMarketValues(ctx context.Context, symbols []string, currency string) (map[string]thirdparty.TokenMarketValues, error) { - log.Debug("call to FetchMarketValues") + logutils.ZapLogger().Debug("call to FetchMarketValues") return api.s.marketManager.FetchTokenMarketValues(symbols, currency) } func (api *API) GetHourlyMarketValues(ctx context.Context, symbol string, currency string, limit int, aggregate int) ([]thirdparty.HistoricalPrice, error) { - log.Debug("call to GetHourlyMarketValues") + logutils.ZapLogger().Debug("call to GetHourlyMarketValues") return api.s.marketManager.FetchHistoricalHourlyPrices(symbol, currency, limit, aggregate) } func (api *API) GetDailyMarketValues(ctx context.Context, symbol string, currency string, limit int, allData bool, aggregate int) ([]thirdparty.HistoricalPrice, error) { - log.Debug("call to GetDailyMarketValues") + logutils.ZapLogger().Debug("call to GetDailyMarketValues") return api.s.marketManager.FetchHistoricalDailyPrices(symbol, currency, limit, allData, aggregate) } // @deprecated func (api *API) FetchTokenDetails(ctx context.Context, symbols []string) (map[string]thirdparty.TokenDetails, error) { - log.Debug("call to FetchTokenDetails") + logutils.ZapLogger().Debug("call to FetchTokenDetails") return api.s.marketManager.FetchTokenDetails(symbols) } func (api *API) GetSuggestedFees(ctx context.Context, chainID uint64) (*fees.SuggestedFeesGwei, error) { - log.Debug("call to GetSuggestedFees") - return api.router.GetFeesManager().SuggestedFeesGwei(ctx, chainID) + logutils.ZapLogger().Debug("call to GetSuggestedFees") + return api.s.router.GetFeesManager().SuggestedFeesGwei(ctx, chainID) } func (api *API) GetEstimatedLatestBlockNumber(ctx context.Context, chainID uint64) (uint64, error) { - log.Debug("call to GetEstimatedLatestBlockNumber, chainID:", chainID) + logutils.ZapLogger().Debug("call to GetEstimatedLatestBlockNumber", zap.Uint64("chainID", chainID)) return api.s.blockChainState.GetEstimatedLatestBlockNumber(ctx, chainID) } func (api *API) GetTransactionEstimatedTime(ctx context.Context, chainID uint64, maxFeePerGas *big.Float) (fees.TransactionEstimation, error) { - log.Debug("call to getTransactionEstimatedTime") - return api.router.GetFeesManager().TransactionEstimatedTime(ctx, chainID, gweiToWei(maxFeePerGas)), nil + logutils.ZapLogger().Debug("call to getTransactionEstimatedTime") + return api.s.router.GetFeesManager().TransactionEstimatedTime(ctx, chainID, gweiToWei(maxFeePerGas)), nil } func gweiToWei(val *big.Float) *big.Int { @@ -494,27 +475,27 @@ func gweiToWei(val *big.Float) *big.Int { } func (api *API) GetSuggestedRoutes(ctx context.Context, input *requests.RouteInputParams) (*router.SuggestedRoutes, error) { - log.Debug("call to GetSuggestedRoutes") + logutils.ZapLogger().Debug("call to GetSuggestedRoutes") - return api.router.SuggestedRoutes(ctx, input) + return api.s.router.SuggestedRoutes(ctx, input) } func (api *API) GetSuggestedRoutesAsync(ctx context.Context, input *requests.RouteInputParams) { - log.Debug("call to GetSuggestedRoutesAsync") + logutils.ZapLogger().Debug("call to GetSuggestedRoutesAsync") - api.router.SuggestedRoutesAsync(input) + api.s.router.SuggestedRoutesAsync(input) } func (api *API) StopSuggestedRoutesAsyncCalculation(ctx context.Context) { - log.Debug("call to StopSuggestedRoutesAsyncCalculation") + logutils.ZapLogger().Debug("call to StopSuggestedRoutesAsyncCalculation") - api.router.StopSuggestedRoutesAsyncCalculation() + api.s.router.StopSuggestedRoutesAsyncCalculation() } func (api *API) StopSuggestedRoutesCalculation(ctx context.Context) { - log.Debug("call to StopSuggestedRoutesCalculation") + logutils.ZapLogger().Debug("call to StopSuggestedRoutesCalculation") - api.router.StopSuggestedRoutesCalculation() + api.s.router.StopSuggestedRoutesCalculation() } // Generates addresses for the provided paths, response doesn't include `HasActivity` value (if you need it check `GetAddressDetails` function) @@ -663,7 +644,7 @@ func (api *API) GetAddressDetails(ctx context.Context, chainID uint64, address s } func (api *API) SignMessage(ctx context.Context, message types.HexBytes, address common.Address, password string) (string, error) { - log.Debug("[WalletAPI::SignMessage]", "message", message, "address", address) + logutils.ZapLogger().Debug("[WalletAPI::SignMessage]", zap.Stringer("message", message), zap.Stringer("address", address)) selectedAccount, err := api.s.gethManager.VerifyAccountPassword(api.s.Config().KeyStoreDir, address.Hex(), password) if err != nil { @@ -674,7 +655,7 @@ func (api *API) SignMessage(ctx context.Context, message types.HexBytes, address } func (api *API) BuildTransaction(ctx context.Context, chainID uint64, sendTxArgsJSON string) (response *transfer.TxResponse, err error) { - log.Debug("[WalletAPI::BuildTransaction]", "chainID", chainID, "sendTxArgsJSON", sendTxArgsJSON) + logutils.ZapLogger().Debug("[WalletAPI::BuildTransaction]", zap.Uint64("chainID", chainID), zap.String("sendTxArgsJSON", sendTxArgsJSON)) var params transactions.SendTxArgs err = json.Unmarshal([]byte(sendTxArgsJSON), ¶ms) if err != nil { @@ -684,7 +665,7 @@ func (api *API) BuildTransaction(ctx context.Context, chainID uint64, sendTxArgs } func (api *API) BuildRawTransaction(ctx context.Context, chainID uint64, sendTxArgsJSON string, signature string) (response *transfer.TxResponse, err error) { - log.Debug("[WalletAPI::BuildRawTransaction]", "chainID", chainID, "sendTxArgsJSON", sendTxArgsJSON, "signature", signature) + logutils.ZapLogger().Debug("[WalletAPI::BuildRawTransaction]", zap.Uint64("chainID", chainID), zap.String("sendTxArgsJSON", sendTxArgsJSON), zap.String("signature", signature)) sig, err := hex.DecodeString(signature) if err != nil { @@ -702,7 +683,12 @@ func (api *API) BuildRawTransaction(ctx context.Context, chainID uint64, sendTxA func (api *API) SendTransactionWithSignature(ctx context.Context, chainID uint64, txType transactions.PendingTrxType, sendTxArgsJSON string, signature string) (hash types.Hash, err error) { - log.Debug("[WalletAPI::SendTransactionWithSignature]", "chainID", chainID, "txType", txType, "sendTxArgsJSON", sendTxArgsJSON, "signature", signature) + logutils.ZapLogger().Debug("[WalletAPI::SendTransactionWithSignature]", + zap.Uint64("chainID", chainID), + zap.String("txType", string(txType)), + zap.String("sendTxArgsJSON", sendTxArgsJSON), + zap.String("signature", signature), + ) sig, err := hex.DecodeString(signature) if err != nil { return hash, err @@ -726,7 +712,7 @@ func (api *API) SendTransactionWithSignature(ctx context.Context, chainID uint64 // // TODO: remove this struct once mobile switches to the new approach func (api *API) CreateMultiTransaction(ctx context.Context, multiTransactionCommand *transfer.MultiTransactionCommand, data []*pathprocessor.MultipathProcessorTxArgs, password string) (*transfer.MultiTransactionCommandResult, error) { - log.Debug("[WalletAPI:: CreateMultiTransaction] create multi transaction") + logutils.ZapLogger().Debug("[WalletAPI:: CreateMultiTransaction] create multi transaction") cmd, err := api.s.transactionManager.CreateMultiTransactionFromCommand(multiTransactionCommand, data) if err != nil { @@ -739,89 +725,25 @@ func (api *API) CreateMultiTransaction(ctx context.Context, multiTransactionComm return nil, err } - cmdRes, err := api.s.transactionManager.SendTransactions(ctx, cmd, data, api.router.GetPathProcessors(), selectedAccount) + cmdRes, err := api.s.transactionManager.SendTransactions(ctx, cmd, data, api.s.router.GetPathProcessors(), selectedAccount) if err != nil { return nil, err } _, err = api.s.transactionManager.InsertMultiTransaction(cmd) if err != nil { - log.Error("Failed to save multi transaction", "error", err) // not critical + logutils.ZapLogger().Error("Failed to save multi transaction", zap.Error(err)) // not critical } return cmdRes, nil } - return nil, api.s.transactionManager.SendTransactionForSigningToKeycard(ctx, cmd, data, api.router.GetPathProcessors()) -} - -func updateFields(sd *responses.SendDetails, inputParams requests.RouteInputParams) { - sd.SendType = int(inputParams.SendType) - sd.FromAddress = types.Address(inputParams.AddrFrom) - sd.ToAddress = types.Address(inputParams.AddrTo) - sd.FromToken = inputParams.TokenID - sd.ToToken = inputParams.ToTokenID - if inputParams.AmountIn != nil { - sd.FromAmount = inputParams.AmountIn.String() - } - if inputParams.AmountOut != nil { - sd.ToAmount = inputParams.AmountOut.String() - } - sd.OwnerTokenBeingSent = inputParams.TokenIDIsOwnerToken - sd.Username = inputParams.Username - sd.PublicKey = inputParams.PublicKey - if inputParams.PackID != nil { - sd.PackID = inputParams.PackID.String() - } + return nil, api.s.transactionManager.SendTransactionForSigningToKeycard(ctx, cmd, data, api.s.router.GetPathProcessors()) } func (api *API) BuildTransactionsFromRoute(ctx context.Context, buildInputParams *requests.RouterBuildTransactionsParams) { - log.Debug("[WalletAPI::BuildTransactionsFromRoute] builds transactions from the generated best route", "uuid", buildInputParams.Uuid) - - go func() { - api.router.StopSuggestedRoutesAsyncCalculation() - - var err error - response := &responses.RouterTransactionsForSigning{ - SendDetails: &responses.SendDetails{ - Uuid: buildInputParams.Uuid, - }, - } - - defer func() { - if err != nil { - api.s.transactionManager.ClearLocalRouterTransactionsData() - err = statusErrors.CreateErrorResponseFromError(err) - response.SendDetails.ErrorResponse = err.(*statusErrors.ErrorResponse) - } - signal.SendWalletEvent(signal.SignRouterTransactions, response) - }() - - route, routeInputParams := api.router.GetBestRouteAndAssociatedInputParams() - if routeInputParams.Uuid != buildInputParams.Uuid { - // should never be here - err = ErrCannotResolveRouteId - return - } - - updateFields(response.SendDetails, routeInputParams) - - // notify client that sending transactions started (has 3 steps, building txs, signing txs, sending txs) - signal.SendWalletEvent(signal.RouterSendingTransactionsStarted, response.SendDetails) - - response.SigningDetails, err = api.s.transactionManager.BuildTransactionsFromRoute( - route, - api.router.GetPathProcessors(), - transfer.BuildRouteExtraParams{ - AddressFrom: routeInputParams.AddrFrom, - AddressTo: routeInputParams.AddrTo, - Username: routeInputParams.Username, - PublicKey: routeInputParams.PublicKey, - PackID: routeInputParams.PackID.ToInt(), - SlippagePercentage: buildInputParams.SlippagePercentage, - }, - ) - }() + logutils.ZapLogger().Debug("[WalletAPI::BuildTransactionsFromRoute] builds transactions from the generated best route", zap.String("uuid", buildInputParams.Uuid)) + api.s.routeExecutionManager.BuildTransactionsFromRoute(ctx, buildInputParams) } // Deprecated: `ProceedWithTransactionsSignatures` is the endpoint used in the old way of sending transactions and should not be used anymore. @@ -834,127 +756,39 @@ func (api *API) BuildTransactionsFromRoute(ctx context.Context, buildInputParams // // TODO: remove this struct once mobile switches to the new approach func (api *API) ProceedWithTransactionsSignatures(ctx context.Context, signatures map[string]transfer.SignatureDetails) (*transfer.MultiTransactionCommandResult, error) { - log.Debug("[WalletAPI:: ProceedWithTransactionsSignatures] sign with signatures and send multi transaction") + logutils.ZapLogger().Debug("[WalletAPI:: ProceedWithTransactionsSignatures] sign with signatures and send multi transaction") return api.s.transactionManager.ProceedWithTransactionsSignatures(ctx, signatures) } func (api *API) SendRouterTransactionsWithSignatures(ctx context.Context, sendInputParams *requests.RouterSendTransactionsParams) { - log.Debug("[WalletAPI:: SendRouterTransactionsWithSignatures] sign with signatures and send") - go func() { - - var ( - err error - routeInputParams requests.RouteInputParams - ) - response := &responses.RouterSentTransactions{ - SendDetails: &responses.SendDetails{ - Uuid: sendInputParams.Uuid, - }, - } - - defer func() { - clearLocalData := true - if routeInputParams.SendType == sendtype.Swap { - // in case of swap don't clear local data if an approval is placed, but swap tx is not sent yet - if api.s.transactionManager.ApprovalRequiredForPath(pathprocessor.ProcessorSwapParaswapName) && - api.s.transactionManager.ApprovalPlacedForPath(pathprocessor.ProcessorSwapParaswapName) && - !api.s.transactionManager.TxPlacedForPath(pathprocessor.ProcessorSwapParaswapName) { - clearLocalData = false - } - } - - if clearLocalData { - api.s.transactionManager.ClearLocalRouterTransactionsData() - } - - if err != nil { - err = statusErrors.CreateErrorResponseFromError(err) - response.SendDetails.ErrorResponse = err.(*statusErrors.ErrorResponse) - } - signal.SendWalletEvent(signal.RouterTransactionsSent, response) - }() - - _, routeInputParams = api.router.GetBestRouteAndAssociatedInputParams() - if routeInputParams.Uuid != sendInputParams.Uuid { - err = ErrCannotResolveRouteId - return - } - - updateFields(response.SendDetails, routeInputParams) - - err = api.s.transactionManager.ValidateAndAddSignaturesToRouterTransactions(sendInputParams.Signatures) - if err != nil { - return - } - - ////////////////////////////////////////////////////////////////////////////// - // prepare multitx - var mtType transfer.MultiTransactionType = transfer.MultiTransactionSend - if routeInputParams.SendType == sendtype.Bridge { - mtType = transfer.MultiTransactionBridge - } else if routeInputParams.SendType == sendtype.Swap { - mtType = transfer.MultiTransactionSwap - } - - multiTx := transfer.NewMultiTransaction( - /* Timestamp: */ uint64(time.Now().Unix()), - /* FromNetworkID: */ 0, - /* ToNetworkID: */ 0, - /* FromTxHash: */ common.Hash{}, - /* ToTxHash: */ common.Hash{}, - /* FromAddress: */ routeInputParams.AddrFrom, - /* ToAddress: */ routeInputParams.AddrTo, - /* FromAsset: */ routeInputParams.TokenID, - /* ToAsset: */ routeInputParams.ToTokenID, - /* FromAmount: */ routeInputParams.AmountIn, - /* ToAmount: */ routeInputParams.AmountOut, - /* Type: */ mtType, - /* CrossTxID: */ "", - ) - - _, err = api.s.transactionManager.InsertMultiTransaction(multiTx) - if err != nil { - return - } - ////////////////////////////////////////////////////////////////////////////// - - response.SentTransactions, err = api.s.transactionManager.SendRouterTransactions(ctx, multiTx) - var ( - chainIDs []uint64 - addresses []common.Address - ) - for _, tx := range response.SentTransactions { - chainIDs = append(chainIDs, tx.FromChain) - addresses = append(addresses, common.Address(tx.FromAddress)) - go func(chainId uint64, txHash common.Hash) { - err = api.s.transactionManager.WatchTransaction(context.Background(), chainId, txHash) - if err != nil { - return - } - }(tx.FromChain, common.Hash(tx.Hash)) - } - err = api.s.transferController.CheckRecentHistory(chainIDs, addresses) - }() + logutils.ZapLogger().Debug("[WalletAPI:: SendRouterTransactionsWithSignatures] sign with signatures and send") + api.s.routeExecutionManager.SendRouterTransactionsWithSignatures(ctx, sendInputParams) } func (api *API) GetMultiTransactions(ctx context.Context, transactionIDs []wcommon.MultiTransactionIDType) ([]*transfer.MultiTransaction, error) { - log.Debug("wallet.api.GetMultiTransactions", "IDs.len", len(transactionIDs)) + logutils.ZapLogger().Debug("wallet.api.GetMultiTransactions", zap.Int("IDs.len", len(transactionIDs))) return api.s.transactionManager.GetMultiTransactions(ctx, transactionIDs) } func (api *API) GetCachedCurrencyFormats() (currency.FormatPerSymbol, error) { - log.Debug("call to GetCachedCurrencyFormats") + logutils.ZapLogger().Debug("call to GetCachedCurrencyFormats") return api.s.currency.GetCachedCurrencyFormats() } func (api *API) FetchAllCurrencyFormats() (currency.FormatPerSymbol, error) { - log.Debug("call to FetchAllCurrencyFormats") + logutils.ZapLogger().Debug("call to FetchAllCurrencyFormats") return api.s.currency.FetchAllCurrencyFormats() } // @deprecated replaced by session APIs; see #12120 func (api *API) FilterActivityAsync(requestID int32, addresses []common.Address, chainIDs []wcommon.ChainID, filter activity.Filter, offset int, limit int) error { - log.Debug("wallet.api.FilterActivityAsync", "requestID", requestID, "addr.count", len(addresses), "chainIDs.count", len(chainIDs), "offset", offset, "limit", limit) + logutils.ZapLogger().Debug("wallet.api.FilterActivityAsync", + zap.Int32("requestID", requestID), + zap.Int("addr.count", len(addresses)), + zap.Int("chainIDs.count", len(chainIDs)), + zap.Int("offset", offset), + zap.Int("limit", limit), + ) api.s.activity.FilterActivityAsync(requestID, addresses, chainIDs, filter, offset, limit) return nil @@ -962,70 +796,93 @@ func (api *API) FilterActivityAsync(requestID int32, addresses []common.Address, // @deprecated replaced by session APIs; see #12120 func (api *API) CancelActivityFilterTask(requestID int32) error { - log.Debug("wallet.api.CancelActivityFilterTask", "requestID", requestID) + logutils.ZapLogger().Debug("wallet.api.CancelActivityFilterTask", zap.Int32("requestID", requestID)) api.s.activity.CancelFilterTask(requestID) return nil } func (api *API) StartActivityFilterSession(addresses []common.Address, chainIDs []wcommon.ChainID, filter activity.Filter, firstPageCount int) (activity.SessionID, error) { - log.Debug("wallet.api.StartActivityFilterSession", "addr.count", len(addresses), "chainIDs.count", len(chainIDs), "firstPageCount", firstPageCount) + logutils.ZapLogger().Debug("wallet.api.StartActivityFilterSession", + zap.Int("addr.count", len(addresses)), + zap.Int("chainIDs.count", len(chainIDs)), + zap.Int("firstPageCount", firstPageCount), + ) return api.s.activity.StartFilterSession(addresses, chainIDs, filter, firstPageCount), nil } func (api *API) UpdateActivityFilterForSession(sessionID activity.SessionID, filter activity.Filter, firstPageCount int) error { - log.Debug("wallet.api.UpdateActivityFilterForSession", "sessionID", sessionID, "firstPageCount", firstPageCount) + logutils.ZapLogger().Debug("wallet.api.UpdateActivityFilterForSession", + zap.Int32("sessionID", int32(sessionID)), + zap.Int("firstPageCount", firstPageCount), + ) return api.s.activity.UpdateFilterForSession(sessionID, filter, firstPageCount) } func (api *API) ResetActivityFilterSession(id activity.SessionID, firstPageCount int) error { - log.Debug("wallet.api.ResetActivityFilterSession", "id", id, "firstPageCount", firstPageCount) + logutils.ZapLogger().Debug("wallet.api.ResetActivityFilterSession", + zap.Int32("id", int32(id)), + zap.Int("firstPageCount", firstPageCount), + ) return api.s.activity.ResetFilterSession(id, firstPageCount) } func (api *API) GetMoreForActivityFilterSession(id activity.SessionID, pageCount int) error { - log.Debug("wallet.api.GetMoreForActivityFilterSession", "id", id, "pageCount", pageCount) + logutils.ZapLogger().Debug("wallet.api.GetMoreForActivityFilterSession", + zap.Int32("id", int32(id)), + zap.Int("pageCount", pageCount), + ) return api.s.activity.GetMoreForFilterSession(id, pageCount) } func (api *API) StopActivityFilterSession(id activity.SessionID) { - log.Debug("wallet.api.StopActivityFilterSession", "id", id) + logutils.ZapLogger().Debug("wallet.api.StopActivityFilterSession", zap.Int32("id", int32(id))) api.s.activity.StopFilterSession(id) } func (api *API) GetMultiTxDetails(ctx context.Context, multiTxID int) (*activity.EntryDetails, error) { - log.Debug("wallet.api.GetMultiTxDetails", "multiTxID", multiTxID) + logutils.ZapLogger().Debug("wallet.api.GetMultiTxDetails", zap.Int("multiTxID", multiTxID)) return api.s.activity.GetMultiTxDetails(ctx, multiTxID) } func (api *API) GetTxDetails(ctx context.Context, id string) (*activity.EntryDetails, error) { - log.Debug("wallet.api.GetTxDetails", "id", id) + logutils.ZapLogger().Debug("wallet.api.GetTxDetails", zap.String("id", id)) return api.s.activity.GetTxDetails(ctx, id) } func (api *API) GetRecipientsAsync(requestID int32, chainIDs []wcommon.ChainID, addresses []common.Address, offset int, limit int) (ignored bool, err error) { - log.Debug("wallet.api.GetRecipientsAsync", "addresses.len", len(addresses), "chainIDs.len", len(chainIDs), "offset", offset, "limit", limit) + logutils.ZapLogger().Debug("wallet.api.GetRecipientsAsync", + zap.Int("addresses.len", len(addresses)), + zap.Int("chainIDs.len", len(chainIDs)), + zap.Int("offset", offset), + zap.Int("limit", limit), + ) ignored = api.s.activity.GetRecipientsAsync(requestID, chainIDs, addresses, offset, limit) return ignored, err } func (api *API) GetOldestActivityTimestampAsync(requestID int32, addresses []common.Address) error { - log.Debug("wallet.api.GetOldestActivityTimestamp", "addresses.len", len(addresses)) + logutils.ZapLogger().Debug("wallet.api.GetOldestActivityTimestamp", zap.Int("addresses.len", len(addresses))) api.s.activity.GetOldestTimestampAsync(requestID, addresses) return nil } func (api *API) GetActivityCollectiblesAsync(requestID int32, chainIDs []wcommon.ChainID, addresses []common.Address, offset int, limit int) error { - log.Debug("wallet.api.GetActivityCollectiblesAsync", "addresses.len", len(addresses), "chainIDs.len", len(chainIDs), "offset", offset, "limit", limit) + logutils.ZapLogger().Debug("wallet.api.GetActivityCollectiblesAsync", + zap.Int("addresses.len", len(addresses)), + zap.Int("chainIDs.len", len(chainIDs)), + zap.Int("offset", offset), + zap.Int("limit", limit), + ) api.s.activity.GetActivityCollectiblesAsync(requestID, chainIDs, addresses, offset, limit) @@ -1033,7 +890,7 @@ func (api *API) GetActivityCollectiblesAsync(requestID int32, chainIDs []wcommon } func (api *API) FetchChainIDForURL(ctx context.Context, rpcURL string) (*big.Int, error) { - log.Debug("wallet.api.VerifyURL", "rpcURL", rpcURL) + logutils.ZapLogger().Debug("wallet.api.VerifyURL", zap.String("rpcURL", rpcURL)) rpcClient, err := gethrpc.Dial(rpcURL) if err != nil { @@ -1046,19 +903,19 @@ func (api *API) FetchChainIDForURL(ctx context.Context, rpcURL string) (*big.Int func (api *API) getVerifiedWalletAccount(address, password string) (*account.SelectedExtKey, error) { exists, err := api.s.accountsDB.AddressExists(types.HexToAddress(address)) if err != nil { - log.Error("failed to query db for a given address", "address", address, "error", err) + logutils.ZapLogger().Error("failed to query db for a given address", zap.String("address", address), zap.Error(err)) return nil, err } if !exists { - log.Error("failed to get a selected account", "err", transactions.ErrInvalidTxSender) + logutils.ZapLogger().Error("failed to get a selected account", zap.Error(transactions.ErrInvalidTxSender)) return nil, transactions.ErrAccountDoesntExist } keyStoreDir := api.s.Config().KeyStoreDir key, err := api.s.gethManager.VerifyAccountPassword(keyStoreDir, address, password) if err != nil { - log.Error("failed to verify account", "account", address, "error", err) + logutils.ZapLogger().Error("failed to verify account", zap.String("account", address), zap.Error(err)) return nil, err } @@ -1070,33 +927,36 @@ func (api *API) getVerifiedWalletAccount(address, password string) (*account.Sel // AddWalletConnectSession adds or updates a session wallet connect session func (api *API) AddWalletConnectSession(ctx context.Context, session_json string) error { - log.Debug("wallet.api.AddWalletConnectSession", "rpcURL", len(session_json)) + logutils.ZapLogger().Debug("wallet.api.AddWalletConnectSession", zap.Int("rpcURL", len(session_json))) return walletconnect.AddSession(api.s.db, api.s.config.Networks, session_json) } // DisconnectWalletConnectSession removes a wallet connect session func (api *API) DisconnectWalletConnectSession(ctx context.Context, topic walletconnect.Topic) error { - log.Debug("wallet.api.DisconnectWalletConnectSession", "topic", topic) + logutils.ZapLogger().Debug("wallet.api.DisconnectWalletConnectSession", zap.String("topic", string(topic))) return walletconnect.DisconnectSession(api.s.db, topic) } // GetWalletConnectActiveSessions returns all active wallet connect sessions func (api *API) GetWalletConnectActiveSessions(ctx context.Context, validAtTimestamp int64) ([]walletconnect.DBSession, error) { - log.Debug("wallet.api.GetWalletConnectActiveSessions") + logutils.ZapLogger().Debug("wallet.api.GetWalletConnectActiveSessions") return walletconnect.GetActiveSessions(api.s.db, validAtTimestamp) } // GetWalletConnectDapps returns all active wallet connect dapps // Active dApp are those having active sessions (not expired and not disconnected) func (api *API) GetWalletConnectDapps(ctx context.Context, validAtTimestamp int64, testChains bool) ([]walletconnect.DBDApp, error) { - log.Debug("wallet.api.GetWalletConnectDapps", "validAtTimestamp", validAtTimestamp, "testChains", testChains) + logutils.ZapLogger().Debug("wallet.api.GetWalletConnectDapps", + zap.Int64("validAtTimestamp", validAtTimestamp), + zap.Bool("testChains", testChains), + ) return walletconnect.GetActiveDapps(api.s.db, validAtTimestamp, testChains) } // HashMessageEIP191 is used for hashing dApps requests for "personal_sign" and "eth_sign" // in a safe manner following the EIP-191 version 0x45 for signing on the client side. func (api *API) HashMessageEIP191(ctx context.Context, message types.HexBytes) types.Hash { - log.Debug("wallet.api.HashMessageEIP191", "len(data)", len(message)) + logutils.ZapLogger().Debug("wallet.api.HashMessageEIP191", zap.Int("len(data)", len(message))) safeMsg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(message), string(message)) return crypto.Keccak256Hash([]byte(safeMsg)) } @@ -1105,7 +965,11 @@ func (api *API) HashMessageEIP191(ctx context.Context, message types.HexBytes) t // the formatted typed data will be prefixed with \x19\x01 based on the EIP-712 // @deprecated func (api *API) SignTypedDataV4(typedJson string, address string, password string) (types.HexBytes, error) { - log.Debug("wallet.api.SignTypedDataV4", "len(typedJson)", len(typedJson), "address", address, "len(password)", len(password)) + logutils.ZapLogger().Debug("wallet.api.SignTypedDataV4", + zap.Int("len(typedJson)", len(typedJson)), + zap.String("address", address), + zap.Int("len(password)", len(password)), + ) account, err := api.getVerifiedWalletAccount(address, password) if err != nil { @@ -1132,7 +996,13 @@ func (api *API) SignTypedDataV4(typedJson string, address string, password strin // old dApps implementation expects // the chain is validate for both cases func (api *API) SafeSignTypedDataForDApps(typedJson string, address string, password string, chainID uint64, legacy bool) (types.HexBytes, error) { - log.Debug("wallet.api.SafeSignTypedDataForDApps", "len(typedJson)", len(typedJson), "address", address, "len(password)", len(password), "chainID", chainID, "legacy", legacy) + logutils.ZapLogger().Debug("wallet.api.SafeSignTypedDataForDApps", + zap.Int("len(typedJson)", len(typedJson)), + zap.String("address", address), + zap.Int("len(password)", len(password)), + zap.Uint64("chainID", chainID), + zap.Bool("legacy", legacy), + ) account, err := api.getVerifiedWalletAccount(address, password) if err != nil { @@ -1147,6 +1017,6 @@ func (api *API) RestartWalletReloadTimer(ctx context.Context) error { } func (api *API) IsChecksumValidForAddress(address string) (bool, error) { - log.Debug("wallet.api.isChecksumValidForAddress", "address", address) + logutils.ZapLogger().Debug("wallet.api.isChecksumValidForAddress", zap.String("address", address)) return abi_spec.CheckAddressChecksum(address) } diff --git a/services/wallet/async/async.go b/services/wallet/async/async.go index 839a3833c3c..a42fe20dde2 100644 --- a/services/wallet/async/async.go +++ b/services/wallet/async/async.go @@ -5,8 +5,10 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) type Command func(context.Context) error @@ -173,7 +175,11 @@ func (d *AtomicGroup) Add(cmd Command) { if err != nil { // do not overwrite original error by context errors if d.error != nil { - log.Info("async.Command failed", "error", err, "d.error", d.error, "group", d.Name()) + logutils.ZapLogger().Info("async.Command failed", + zap.String("group", d.Name()), + zap.NamedError("error", err), + zap.NamedError("d.error", d.error), + ) return } d.error = err @@ -284,7 +290,11 @@ type ErrorCounter struct { // Returns false in case of counter overflow func (ec *ErrorCounter) SetError(err error) bool { - log.Debug("ErrorCounter setError", "msg", ec.msg, "err", err, "cnt", ec.cnt) + logutils.ZapLogger().Debug("ErrorCounter setError", + zap.String("msg", ec.msg), + zap.Error(err), + zap.Int("cnt", ec.cnt), + ) ec.cnt++ @@ -294,7 +304,7 @@ func (ec *ErrorCounter) SetError(err error) bool { } if ec.cnt >= ec.maxErrors { - log.Error("ErrorCounter overflow", "msg", ec.msg) + logutils.ZapLogger().Error("ErrorCounter overflow", zap.String("msg", ec.msg)) return false } diff --git a/services/wallet/balance/ttl_cache.go b/services/wallet/balance/ttl_cache.go index a438658bb2d..de03db1e317 100644 --- a/services/wallet/balance/ttl_cache.go +++ b/services/wallet/balance/ttl_cache.go @@ -6,8 +6,10 @@ import ( "time" "github.com/jellydator/ttlcache/v3" + "go.uber.org/zap" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" ) var ( @@ -54,9 +56,16 @@ func (c *ttlCache[K, V]) init() { ttlcache.WithTTL[K, V](defaultTTLValue), ) c.cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[K, V]) { - log.Debug("Evicting item from balance/nonce cache", "reason", reason, "key", item.Key, "value", item.Value) + logutils.ZapLogger().Debug("Evicting item from balance/nonce cache", + zap.Int("reason", int(reason)), + zap.Any("key", item.Key), + zap.Any("value", item.Value), + ) }) - go c.cache.Start() // starts automatic expired item deletion + go func() { // starts automatic expired item deletion + defer common.LogOnPanic() + c.cache.Start() + }() } //nolint:golint,unused // linter does not detect using it via reflect diff --git a/services/wallet/collectibles/commands.go b/services/wallet/collectibles/commands.go index 3375c8cef60..4dfc9a7a1de 100644 --- a/services/wallet/collectibles/commands.go +++ b/services/wallet/collectibles/commands.go @@ -8,9 +8,11 @@ import ( "sync/atomic" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/bigint" walletCommon "github.com/status-im/status-go/services/wallet/common" @@ -252,7 +254,10 @@ func (c *loadOwnedCollectiblesCommand) sendOwnedCollectiblesChanges(removed, upd } func (c *loadOwnedCollectiblesCommand) Run(parent context.Context) (err error) { - log.Debug("start loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account) + logutils.ZapLogger().Debug("start loadOwnedCollectiblesCommand", + zap.Uint64("chain", uint64(c.chainID)), + zap.Stringer("account", c.account), + ) pageNr := 0 cursor := thirdparty.FetchFromStartCursor @@ -276,17 +281,32 @@ func (c *loadOwnedCollectiblesCommand) Run(parent context.Context) (err error) { } pageStart := time.Now() - log.Debug("start loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account, "page", pageNr) + logutils.ZapLogger().Debug("start loadOwnedCollectiblesCommand", + zap.Uint64("chain", uint64(c.chainID)), + zap.Stringer("account", c.account), + zap.Int("page", pageNr), + ) partialOwnership, err := c.manager.FetchCollectibleOwnershipByOwner(parent, c.chainID, c.account, cursor, fetchLimit, providerID) if err != nil { - log.Error("failed loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account, "page", pageNr, "error", err) + logutils.ZapLogger().Error("failed loadOwnedCollectiblesCommand", + zap.Uint64("chain", uint64(c.chainID)), + zap.Stringer("account", c.account), + zap.Int("page", pageNr), + zap.Error(err), + ) c.err = err break } - log.Debug("partial loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account, "page", pageNr, "in", time.Since(pageStart), "found", len(partialOwnership.Items)) + logutils.ZapLogger().Debug("partial loadOwnedCollectiblesCommand", + zap.Uint64("chain", uint64(c.chainID)), + zap.Stringer("account", c.account), + zap.Int("page", pageNr), + zap.Duration("duration", time.Since(pageStart)), + zap.Int("found", len(partialOwnership.Items)), + ) c.partialOwnership = append(c.partialOwnership, partialOwnership.Items...) @@ -303,7 +323,11 @@ func (c *loadOwnedCollectiblesCommand) Run(parent context.Context) (err error) { updateMessage.Removed, updateMessage.Updated, updateMessage.Added, err = c.ownershipDB.Update(c.chainID, c.account, balances, start.Unix()) if err != nil { - log.Error("failed updating ownershipDB in loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account, "error", err) + logutils.ZapLogger().Error("failed updating ownershipDB in loadOwnedCollectiblesCommand", + zap.Uint64("chain", uint64(c.chainID)), + zap.Stringer("account", c.account), + zap.Error(err), + ) c.err = err break } @@ -337,6 +361,10 @@ func (c *loadOwnedCollectiblesCommand) Run(parent context.Context) (err error) { c.triggerEvent(EventCollectiblesOwnershipUpdateFinished, c.chainID, c.account, string(encodedMessage)) } - log.Debug("end loadOwnedCollectiblesCommand", "chain", c.chainID, "account", c.account, "in", time.Since(start)) + logutils.ZapLogger().Debug("end loadOwnedCollectiblesCommand", + zap.Uint64("chain", uint64(c.chainID)), + zap.Stringer("account", c.account), + zap.Duration("in", time.Since(start)), + ) return nil } diff --git a/services/wallet/collectibles/controller.go b/services/wallet/collectibles/controller.go index 35df24adf53..dcbeb5ccdc4 100644 --- a/services/wallet/collectibles/controller.go +++ b/services/wallet/collectibles/controller.go @@ -7,9 +7,11 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/multiaccounts/settings" "github.com/status-im/status-go/rpc/network" @@ -152,7 +154,7 @@ func (c *Controller) startPeriodicalOwnershipFetch() error { for _, addr := range addresses { err := c.startPeriodicalOwnershipFetchForAccount(common.Address(addr)) if err != nil { - log.Error("Error starting periodical collectibles fetch for accpunt", "address", addr, "error", err) + logutils.ZapLogger().Error("Error starting periodical collectibles fetch for accpunt", zap.Stringer("address", addr), zap.Error(err)) return err } } @@ -182,7 +184,7 @@ func (c *Controller) stopPeriodicalOwnershipFetch() { // Starts (or restarts) periodical fetching for the given account address for all chains func (c *Controller) startPeriodicalOwnershipFetchForAccount(address common.Address) error { - log.Debug("wallet.api.collectibles.Controller Start periodical fetching", "address", address) + logutils.ZapLogger().Debug("wallet.api.collectibles.Controller Start periodical fetching", zap.Stringer("address", address)) networks, err := c.networkManager.Get(false) if err != nil { @@ -211,7 +213,11 @@ func (c *Controller) startPeriodicalOwnershipFetchForAccount(address common.Addr // Starts (or restarts) periodical fetching for the given account address for all chains func (c *Controller) startPeriodicalOwnershipFetchForAccountAndChainID(address common.Address, chainID walletCommon.ChainID, delayed bool) error { - log.Debug("wallet.api.collectibles.Controller Start periodical fetching", "address", address, "chainID", chainID, "delayed", delayed) + logutils.ZapLogger().Debug("wallet.api.collectibles.Controller Start periodical fetching", + zap.Stringer("address", address), + zap.Stringer("chainID", chainID), + zap.Bool("delayed", delayed), + ) if !c.isPeriodicalOwnershipFetchRunning() { return errors.New("periodical fetch not initialized") @@ -247,7 +253,7 @@ func (c *Controller) startPeriodicalOwnershipFetchForAccountAndChainID(address c // Stop periodical fetching for the given account address for all chains func (c *Controller) stopPeriodicalOwnershipFetchForAccount(address common.Address) error { - log.Debug("wallet.api.collectibles.Controller Stop periodical fetching", "address", address) + logutils.ZapLogger().Debug("wallet.api.collectibles.Controller Stop periodical fetching", zap.Stringer("address", address)) if !c.isPeriodicalOwnershipFetchRunning() { return errors.New("periodical fetch not initialized") @@ -267,7 +273,10 @@ func (c *Controller) stopPeriodicalOwnershipFetchForAccount(address common.Addre } func (c *Controller) stopPeriodicalOwnershipFetchForAccountAndChainID(address common.Address, chainID walletCommon.ChainID) error { - log.Debug("wallet.api.collectibles.Controller Stop periodical fetching", "address", address, "chainID", chainID) + logutils.ZapLogger().Debug("wallet.api.collectibles.Controller Stop periodical fetching", + zap.Stringer("address", address), + zap.Stringer("chainID", chainID), + ) if !c.isPeriodicalOwnershipFetchRunning() { return errors.New("periodical fetch not initialized") @@ -300,14 +309,14 @@ func (c *Controller) startAccountsWatcher() { for _, address := range changedAddresses { err := c.startPeriodicalOwnershipFetchForAccount(address) if err != nil { - log.Error("Error starting periodical collectibles fetch", "address", address, "error", err) + logutils.ZapLogger().Error("Error starting periodical collectibles fetch", zap.Stringer("address", address), zap.Error(err)) } } } else if eventType == accountsevent.EventTypeRemoved { for _, address := range changedAddresses { err := c.stopPeriodicalOwnershipFetchForAccount(address) if err != nil { - log.Error("Error starting periodical collectibles fetch", "address", address, "error", err) + logutils.ZapLogger().Error("Error starting periodical collectibles fetch", zap.Stringer("address", address), zap.Error(err)) } } } @@ -370,7 +379,7 @@ func (c *Controller) startSettingsWatcher() { c.stopPeriodicalOwnershipFetch() err := c.startPeriodicalOwnershipFetch() if err != nil { - log.Error("Error starting periodical collectibles fetch", "error", err) + logutils.ZapLogger().Error("Error starting periodical collectibles fetch", zap.Error(err)) } } } @@ -393,7 +402,7 @@ func (c *Controller) refetchOwnershipIfRecentTransfer(account common.Address, ch timestamp, err := c.ownershipDB.GetOwnershipUpdateTimestamp(account, chainID) if err != nil { - log.Error("Error getting ownership update timestamp", "error", err) + logutils.ZapLogger().Error("Error getting ownership update timestamp", zap.Error(err)) return } if timestamp == InvalidTimestamp { @@ -412,7 +421,7 @@ func (c *Controller) refetchOwnershipIfRecentTransfer(account common.Address, ch err := c.startPeriodicalOwnershipFetchForAccountAndChainID(account, chainID, true) c.commandsLock.Unlock() if err != nil { - log.Error("Error starting periodical collectibles fetch", "address", account, "error", err) + logutils.ZapLogger().Error("Error starting periodical collectibles fetch", zap.Stringer("address", account), zap.Error(err)) } } } diff --git a/services/wallet/collectibles/manager.go b/services/wallet/collectibles/manager.go index b7685f4cda2..4ead12ddbdb 100644 --- a/services/wallet/collectibles/manager.go +++ b/services/wallet/collectibles/manager.go @@ -11,14 +11,16 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/circuitbreaker" gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/contracts/community-tokens/collectibles" "github.com/status-im/status-go/contracts/ierc1155" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/server" @@ -135,7 +137,7 @@ func (o *Manager) doContentTypeRequest(ctx context.Context, url string) (string, } defer func() { if err := resp.Body.Close(); err != nil { - log.Error("failed to close head request body", "err", err) + logutils.ZapLogger().Error("failed to close head request body", zap.Error(err)) } }() @@ -233,7 +235,10 @@ func (o *Manager) FetchAllAssetsByOwnerAndContractAddress(ctx context.Context, c func() ([]interface{}, error) { assetContainer, err := provider.FetchAllAssetsByOwnerAndContractAddress(ctx, chainID, owner, contractAddresses, cursor, limit) if err != nil { - log.Error("FetchAllAssetsByOwnerAndContractAddress failed for", "provider", provider.ID(), "chainID", chainID, "err", err) + logutils.ZapLogger().Error("FetchAllAssetsByOwnerAndContractAddress failed for", + zap.String("provider", provider.ID()), + zap.Stringer("chainID", chainID), + zap.Error(err)) } return []interface{}{assetContainer}, err }, getCircuitName(provider, chainID), @@ -247,7 +252,10 @@ func (o *Manager) FetchAllAssetsByOwnerAndContractAddress(ctx context.Context, c cmdRes := o.circuitBreaker.Execute(cmd) if cmdRes.Error() != nil { - log.Error("FetchAllAssetsByOwnerAndContractAddress failed for", "chainID", chainID, "err", cmdRes.Error()) + logutils.ZapLogger().Error("FetchAllAssetsByOwnerAndContractAddress failed for", + zap.Stringer("chainID", chainID), + zap.Error(cmdRes.Error()), + ) return nil, cmdRes.Error() } @@ -277,7 +285,11 @@ func (o *Manager) FetchAllAssetsByOwner(ctx context.Context, chainID walletCommo func() ([]interface{}, error) { assetContainer, err := provider.FetchAllAssetsByOwner(ctx, chainID, owner, cursor, limit) if err != nil { - log.Error("FetchAllAssetsByOwner failed for", "provider", provider.ID(), "chainID", chainID, "err", err) + logutils.ZapLogger().Error("FetchAllAssetsByOwner failed for", + zap.String("provider", provider.ID()), + zap.Stringer("chainID", chainID), + zap.Error(err), + ) } return []interface{}{assetContainer}, err }, getCircuitName(provider, chainID), @@ -291,7 +303,10 @@ func (o *Manager) FetchAllAssetsByOwner(ctx context.Context, chainID walletCommo cmdRes := o.circuitBreaker.Execute(cmd) if cmdRes.Error() != nil { - log.Error("FetchAllAssetsByOwner failed for", "chainID", chainID, "err", cmdRes.Error()) + logutils.ZapLogger().Error("FetchAllAssetsByOwner failed for", + zap.Stringer("chainID", chainID), + zap.Error(cmdRes.Error()), + ) return nil, cmdRes.Error() } @@ -372,7 +387,11 @@ func (o *Manager) fillMissingBalances(ctx context.Context, owner common.Address, balances, err := o.FetchERC1155Balances(ctx, owner, chainID, contractAddress, tokenIDs) if err != nil { - log.Error("FetchERC1155Balances failed", "chainID", chainID, "contractAddress", contractAddress, "err", err) + logutils.ZapLogger().Error("FetchERC1155Balances failed", + zap.Stringer("chainID", chainID), + zap.Stringer("contractAddress", contractAddress), + zap.Error(err), + ) continue } @@ -432,13 +451,21 @@ func (o *Manager) FetchMissingAssetsByCollectibleUniqueID(ctx context.Context, u fetchedAssets, err := o.fetchMissingAssetsForChainByCollectibleUniqueID(ctx, chainID, idsToFetch) if err != nil { - log.Error("FetchMissingAssetsByCollectibleUniqueID failed for", "chainID", chainID, "ids", idsToFetch, "err", err) + logutils.ZapLogger().Error("FetchMissingAssetsByCollectibleUniqueID failed for", + zap.Stringer("chainID", chainID), + zap.Any("ids", idsToFetch), + zap.Error(err), + ) return err } updatedCollectibles, err := o.processFullCollectibleData(ctx, fetchedAssets, asyncFetch) if err != nil { - log.Error("processFullCollectibleData failed for", "chainID", chainID, "len(fetchedAssets)", len(fetchedAssets), "err", err) + logutils.ZapLogger().Error("processFullCollectibleData failed for", + zap.Stringer("chainID", chainID), + zap.Int("len(fetchedAssets)", len(fetchedAssets)), + zap.Error(err), + ) return err } @@ -466,7 +493,11 @@ func (o *Manager) fetchMissingAssetsForChainByCollectibleUniqueID(ctx context.Co cmd.Add(circuitbreaker.NewFunctor(func() ([]any, error) { fetchedAssets, err := provider.FetchAssetsByCollectibleUniqueID(ctx, idsToFetch) if err != nil { - log.Error("fetchMissingAssetsForChainByCollectibleUniqueID failed for", "provider", provider.ID(), "chainID", chainID, "err", err) + logutils.ZapLogger().Error("fetchMissingAssetsForChainByCollectibleUniqueID failed", + zap.String("provider", provider.ID()), + zap.Stringer("chainID", chainID), + zap.Error(err), + ) } return []any{fetchedAssets}, err @@ -479,7 +510,10 @@ func (o *Manager) fetchMissingAssetsForChainByCollectibleUniqueID(ctx context.Co cmdRes := o.circuitBreaker.Execute(cmd) if cmdRes.Error() != nil { - log.Error("fetchMissingAssetsForChainByCollectibleUniqueID failed for", "chainID", chainID, "err", cmdRes.Error()) + logutils.ZapLogger().Error("fetchMissingAssetsForChainByCollectibleUniqueID failed for", + zap.Stringer("chainID", chainID), + zap.Error(cmdRes.Error()), + ) return nil, cmdRes.Error() } return cmdRes.Result()[0].([]thirdparty.FullCollectibleData), cmdRes.Error() @@ -518,7 +552,10 @@ func (o *Manager) FetchCollectionsDataByContractID(ctx context.Context, ids []th cmdRes := o.circuitBreaker.Execute(cmd) if cmdRes.Error() != nil { - log.Error("FetchCollectionsDataByContractID failed for", "chainID", chainID, "err", cmdRes.Error()) + logutils.ZapLogger().Error("FetchCollectionsDataByContractID failed for", + zap.Stringer("chainID", chainID), + zap.Error(cmdRes.Error()), + ) return cmdRes.Error() } @@ -563,7 +600,11 @@ func (o *Manager) FetchCollectibleOwnersByContractAddress(ctx context.Context, c cmd.Add(circuitbreaker.NewFunctor(func() ([]any, error) { res, err := provider.FetchCollectibleOwnersByContractAddress(ctx, chainID, contractAddress) if err != nil { - log.Error("FetchCollectibleOwnersByContractAddress failed for", "provider", provider.ID(), "chainID", chainID, "err", err) + logutils.ZapLogger().Error("FetchCollectibleOwnersByContractAddress failed", + zap.String("provider", provider.ID()), + zap.Stringer("chainID", chainID), + zap.Error(err), + ) } return []any{res}, err }, getCircuitName(provider, chainID))) @@ -575,7 +616,10 @@ func (o *Manager) FetchCollectibleOwnersByContractAddress(ctx context.Context, c cmdRes := o.circuitBreaker.Execute(cmd) if cmdRes.Error() != nil { - log.Error("FetchCollectibleOwnersByContractAddress failed for", "chainID", chainID, "err", cmdRes.Error()) + logutils.ZapLogger().Error("FetchCollectibleOwnersByContractAddress failed for", + zap.Stringer("chainID", chainID), + zap.Error(cmdRes.Error()), + ) return nil, cmdRes.Error() } return cmdRes.Result()[0].(*thirdparty.CollectibleContractOwnership), cmdRes.Error() @@ -642,7 +686,7 @@ func (o *Manager) processFullCollectibleData(ctx context.Context, assets []third // Get TokenURI if not given by provider err := o.fillTokenURI(ctx, asset) if err != nil { - log.Error("fillTokenURI failed", "err", err) + logutils.ZapLogger().Error("fillTokenURI failed", zap.Error(err)) delete(fullyFetchedAssets, asset.CollectibleData.ID.HashKey()) continue } @@ -650,7 +694,7 @@ func (o *Manager) processFullCollectibleData(ctx context.Context, assets []third // Get CommunityID if obtainable from TokenURI err = o.fillCommunityID(asset) if err != nil { - log.Error("fillCommunityID failed", "err", err) + logutils.ZapLogger().Error("fillCommunityID failed", zap.Error(err)) delete(fullyFetchedAssets, asset.CollectibleData.ID.HashKey()) continue } @@ -676,7 +720,7 @@ func (o *Manager) processFullCollectibleData(ctx context.Context, assets []third } else { err := o.fetchCommunityAssets(communityID, communityAssets) if err != nil { - log.Error("fetchCommunityAssets failed", "communityID", communityID, "err", err) + logutils.ZapLogger().Error("fetchCommunityAssets failed", zap.String("communityID", communityID), zap.Error(err)) continue } for _, asset := range communityAssets { @@ -688,7 +732,7 @@ func (o *Manager) processFullCollectibleData(ctx context.Context, assets []third for _, asset := range fullyFetchedAssets { err := o.fillAnimationMediatype(ctx, asset) if err != nil { - log.Error("fillAnimationMediatype failed", "err", err) + logutils.ZapLogger().Error("fillAnimationMediatype failed", zap.Error(err)) delete(fullyFetchedAssets, asset.CollectibleData.ID.HashKey()) continue } @@ -764,9 +808,9 @@ func (o *Manager) fillCommunityID(asset *thirdparty.FullCollectibleData) error { func (o *Manager) fetchCommunityAssets(communityID string, communityAssets []*thirdparty.FullCollectibleData) error { communityFound, err := o.communityManager.FillCollectiblesMetadata(communityID, communityAssets) if err != nil { - log.Error("FillCollectiblesMetadata failed", "communityID", communityID, "err", err) + logutils.ZapLogger().Error("FillCollectiblesMetadata failed", zap.String("communityID", communityID), zap.Error(err)) } else if !communityFound { - log.Warn("fetchCommunityAssets community not found", "communityID", communityID) + logutils.ZapLogger().Warn("fetchCommunityAssets community not found", zap.String("communityID", communityID)) } // If the community is found, we update the DB. @@ -785,13 +829,13 @@ func (o *Manager) fetchCommunityAssets(communityID string, communityAssets []*th err = o.collectiblesDataDB.SetData(collectiblesData, allowUpdate) if err != nil { - log.Error("collectiblesDataDB SetData failed", "communityID", communityID, "err", err) + logutils.ZapLogger().Error("collectiblesDataDB SetData failed", zap.String("communityID", communityID), zap.Error(err)) return err } err = o.collectionsDataDB.SetData(collectionsData, allowUpdate) if err != nil { - log.Error("collectionsDataDB SetData failed", "communityID", communityID, "err", err) + logutils.ZapLogger().Error("collectionsDataDB SetData failed", zap.String("communityID", communityID), zap.Error(err)) return err } @@ -799,7 +843,7 @@ func (o *Manager) fetchCommunityAssets(communityID string, communityAssets []*th if asset.CollectibleCommunityInfo != nil { err = o.collectiblesDataDB.SetCommunityInfo(asset.CollectibleData.ID, *asset.CollectibleCommunityInfo) if err != nil { - log.Error("collectiblesDataDB SetCommunityInfo failed", "communityID", communityID, "err", err) + logutils.ZapLogger().Error("collectiblesDataDB SetCommunityInfo failed", zap.String("communityID", communityID), zap.Error(err)) return err } } @@ -817,7 +861,7 @@ func (o *Manager) fetchCommunityAssetsAsync(_ context.Context, communityID strin defer gocommon.LogOnPanic() err := o.fetchCommunityAssets(communityID, communityAssets) if err != nil { - log.Error("fetchCommunityAssets failed", "communityID", communityID, "err", err) + logutils.ZapLogger().Error("fetchCommunityAssets failed", zap.String("communityID", communityID), zap.Error(err)) return } @@ -968,7 +1012,7 @@ func (o *Manager) signalUpdatedCollectiblesData(ids []thirdparty.CollectibleUniq collectibles, err := o.getCacheFullCollectibleData(pageIDs) if err != nil { - log.Error("Error getting FullCollectibleData from cache: %v", err) + logutils.ZapLogger().Error("Error getting FullCollectibleData from cache", zap.Error(err)) return } @@ -977,7 +1021,7 @@ func (o *Manager) signalUpdatedCollectiblesData(ids []thirdparty.CollectibleUniq payload, err := json.Marshal(details) if err != nil { - log.Error("Error marshaling response: %v", err) + logutils.ZapLogger().Error("Error marshaling response", zap.Error(err)) return } @@ -1008,7 +1052,11 @@ func (o *Manager) SearchCollectibles(ctx context.Context, chainID walletCommon.C container, err := provider.SearchCollectibles(ctx, chainID, collections, text, cursor, limit) if err != nil { - log.Error("FetchAllAssetsByOwner failed for", "provider", provider.ID(), "chainID", chainID, "err", err) + logutils.ZapLogger().Error("FetchAllAssetsByOwner failed for", + zap.String("provider", provider.ID()), + zap.Stringer("chainID", chainID), + zap.Error(err), + ) continue } @@ -1042,7 +1090,11 @@ func (o *Manager) SearchCollections(ctx context.Context, chainID walletCommon.Ch // TODO (#13951): Be smarter about how we handle the user-entered string container, err := provider.SearchCollections(ctx, chainID, query, cursor, limit) if err != nil { - log.Error("FetchAllAssetsByOwner failed for", "provider", provider.ID(), "chainID", chainID, "err", err) + logutils.ZapLogger().Error("FetchAllAssetsByOwner failed for", + zap.String("provider", provider.ID()), + zap.Stringer("chainID", chainID), + zap.Error(err), + ) continue } @@ -1067,7 +1119,11 @@ func (o *Manager) FetchCollectionSocialsAsync(contractID thirdparty.ContractID) socials, err := o.getOrFetchSocialsForCollection(context.Background(), contractID) if err != nil || socials == nil { - log.Debug("FetchCollectionSocialsAsync failed for", "chainID", contractID.ChainID, "address", contractID.Address, "err", err) + logutils.ZapLogger().Debug("FetchCollectionSocialsAsync failed for", + zap.Stringer("chainID", contractID.ChainID), + zap.Stringer("address", contractID.Address), + zap.Error(err), + ) return } @@ -1078,7 +1134,7 @@ func (o *Manager) FetchCollectionSocialsAsync(contractID thirdparty.ContractID) payload, err := json.Marshal(socialsMessage) if err != nil { - log.Error("Error marshaling response: %v", err) + logutils.ZapLogger().Error("Error marshaling response", zap.Error(err)) return } @@ -1096,7 +1152,11 @@ func (o *Manager) FetchCollectionSocialsAsync(contractID thirdparty.ContractID) func (o *Manager) getOrFetchSocialsForCollection(_ context.Context, contractID thirdparty.ContractID) (*thirdparty.CollectionSocials, error) { socials, err := o.collectionsDataDB.GetSocialsForID(contractID) if err != nil { - log.Debug("getOrFetchSocialsForCollection failed for", "chainID", contractID.ChainID, "address", contractID.Address, "err", err) + logutils.ZapLogger().Debug("getOrFetchSocialsForCollection failed for", + zap.Stringer("chainID", contractID.ChainID), + zap.Stringer("address", contractID.Address), + zap.Error(err), + ) return nil, err } if socials == nil { @@ -1116,7 +1176,11 @@ func (o *Manager) fetchSocialsForCollection(ctx context.Context, contractID thir cmd.Add(circuitbreaker.NewFunctor(func() ([]interface{}, error) { socials, err := provider.FetchCollectionSocials(ctx, contractID) if err != nil { - log.Error("FetchCollectionSocials failed for", "provider", provider.ID(), "chainID", contractID.ChainID, "err", err) + logutils.ZapLogger().Error("FetchCollectionSocials failed for", + zap.String("provider", provider.ID()), + zap.Stringer("chainID", contractID.ChainID), + zap.Error(err), + ) } return []interface{}{socials}, err }, getCircuitName(provider, contractID.ChainID))) @@ -1128,14 +1192,17 @@ func (o *Manager) fetchSocialsForCollection(ctx context.Context, contractID thir cmdRes := o.circuitBreaker.Execute(cmd) if cmdRes.Error() != nil { - log.Error("fetchSocialsForCollection failed for", "chainID", contractID.ChainID, "err", cmdRes.Error()) + logutils.ZapLogger().Error("fetchSocialsForCollection failed for", + zap.Stringer("chainID", contractID.ChainID), + zap.Error(cmdRes.Error()), + ) return nil, cmdRes.Error() } socials := cmdRes.Result()[0].(*thirdparty.CollectionSocials) err := o.collectionsDataDB.SetCollectionSocialsData(contractID, socials) if err != nil { - log.Error("Error saving socials to DB: %v", err) + logutils.ZapLogger().Error("Error saving socials to DB", zap.Error(err)) return nil, err } diff --git a/services/wallet/collectibles/service.go b/services/wallet/collectibles/service.go index 062930b2eef..2ef662c0ddf 100644 --- a/services/wallet/collectibles/service.go +++ b/services/wallet/collectibles/service.go @@ -8,10 +8,12 @@ import ( "math/big" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/rpc/network" @@ -341,12 +343,17 @@ func (s *Service) Stop() { func (s *Service) sendResponseEvent(requestID *int32, eventType walletevent.EventType, payloadObj interface{}, resErr error) { payload, err := json.Marshal(payloadObj) if err != nil { - log.Error("Error marshaling response: %v; result error: %w", err, resErr) + logutils.ZapLogger().Error("Error marshaling", zap.NamedError("response", err), zap.NamedError("result", resErr)) } else { err = resErr } - log.Debug("wallet.api.collectibles.Service RESPONSE", "requestID", requestID, "eventType", eventType, "error", err, "payload.len", len(payload)) + logutils.ZapLogger().Debug("wallet.api.collectibles.Service RESPONSE", + zap.Any("requestID", requestID), + zap.String("eventType", string(eventType)), + zap.Int("payload.len", len(payload)), + zap.Error(err), + ) event := walletevent.Event{ Type: eventType, @@ -443,7 +450,7 @@ func (s *Service) onCollectiblesTransfer(account common.Address, chainID walletC } err := s.manager.SetCollectibleTransferID(account, id, transfer.ID, true) if err != nil { - log.Error("Error setting transfer ID for collectible", "error", err) + logutils.ZapLogger().Error("Error setting transfer ID for collectible", zap.Error(err)) } } } @@ -462,7 +469,7 @@ func (s *Service) lookupTransferForCollectibles(ownedCollectibles OwnedCollectib for _, id := range ownedCollectibles.ids { transfer, err := s.transferDB.GetLatestCollectibleTransfer(ownedCollectibles.account, id) if err != nil { - log.Error("Error fetching latest collectible transfer", "error", err) + logutils.ZapLogger().Error("Error fetching latest collectible transfer", zap.Error(err)) continue } if transfer != nil { @@ -472,7 +479,7 @@ func (s *Service) lookupTransferForCollectibles(ownedCollectibles OwnedCollectib } err = s.manager.SetCollectibleTransferID(ownedCollectibles.account, id, transfer.ID, false) if err != nil { - log.Error("Error setting transfer ID for collectible", "error", err) + logutils.ZapLogger().Error("Error setting transfer ID for collectible", zap.Error(err)) } } } @@ -489,7 +496,7 @@ func (s *Service) notifyCommunityCollectiblesReceived(ownedCollectibles OwnedCol collectiblesData, err := s.manager.FetchAssetsByCollectibleUniqueID(ctx, ownedCollectibles.ids, false) if err != nil { - log.Error("Error fetching collectibles data", "error", err) + logutils.ZapLogger().Error("Error fetching collectibles data", zap.Error(err)) return } diff --git a/services/wallet/common/log_parser.go b/services/wallet/common/log_parser.go index 4a5dcc1a8b7..9e9a40f8942 100644 --- a/services/wallet/common/log_parser.go +++ b/services/wallet/common/log_parser.go @@ -7,10 +7,12 @@ import ( "fmt" "math/big" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) // Type type of transaction @@ -176,18 +178,18 @@ func ParseWETHDepositLog(ethlog *types.Log) (src common.Address, amount *big.Int amount = new(big.Int) if len(ethlog.Topics) < 2 { - log.Warn("not enough topics for WETH deposit", "topics", ethlog.Topics) + logutils.ZapLogger().Warn("not enough topics for WETH deposit", zap.Stringers("topics", ethlog.Topics)) return } if len(ethlog.Topics[1]) != 32 { - log.Warn("second topic is not padded to 32 byte address", "topic", ethlog.Topics[1]) + logutils.ZapLogger().Warn("second topic is not padded to 32 byte address", zap.Stringer("topic", ethlog.Topics[1])) return } copy(src[:], ethlog.Topics[1][12:]) if len(ethlog.Data) != 32 { - log.Warn("data is not padded to 32 byte big int", "data", ethlog.Data) + logutils.ZapLogger().Warn("data is not padded to 32 byte big int", zap.Binary("data", ethlog.Data)) return } amount.SetBytes(ethlog.Data) @@ -199,18 +201,18 @@ func ParseWETHWithdrawLog(ethlog *types.Log) (dst common.Address, amount *big.In amount = new(big.Int) if len(ethlog.Topics) < 2 { - log.Warn("not enough topics for WETH withdraw", "topics", ethlog.Topics) + logutils.ZapLogger().Warn("not enough topics for WETH withdraw", zap.Stringers("topics", ethlog.Topics)) return } if len(ethlog.Topics[1]) != 32 { - log.Warn("second topic is not padded to 32 byte address", "topic", ethlog.Topics[1]) + logutils.ZapLogger().Warn("second topic is not padded to 32 byte address", zap.Stringer("topic", ethlog.Topics[1])) return } copy(dst[:], ethlog.Topics[1][12:]) if len(ethlog.Data) != 32 { - log.Warn("data is not padded to 32 byte big int", "data", ethlog.Data) + logutils.ZapLogger().Warn("data is not padded to 32 byte big int", zap.Binary("data", ethlog.Data)) return } amount.SetBytes(ethlog.Data) @@ -221,18 +223,18 @@ func ParseWETHWithdrawLog(ethlog *types.Log) (dst common.Address, amount *big.In func ParseErc20TransferLog(ethlog *types.Log) (from, to common.Address, amount *big.Int) { amount = new(big.Int) if len(ethlog.Topics) < erc20TransferEventIndexedParameters { - log.Warn("not enough topics for erc20 transfer", "topics", ethlog.Topics) + logutils.ZapLogger().Warn("not enough topics for erc20 transfer", zap.Stringers("topics", ethlog.Topics)) return } var err error from, to, err = getFromToAddresses(*ethlog) if err != nil { - log.Error("log_parser::ParseErc20TransferLog", err) + logutils.ZapLogger().Error("log_parser::ParseErc20TransferLog", zap.Error(err)) return } if len(ethlog.Data) != 32 { - log.Warn("data is not padded to 32 byts big int", "data", ethlog.Data) + logutils.ZapLogger().Warn("data is not padded to 32 byts big int", zap.Binary("data", ethlog.Data)) return } amount.SetBytes(ethlog.Data) @@ -243,14 +245,14 @@ func ParseErc20TransferLog(ethlog *types.Log) (from, to common.Address, amount * func ParseErc721TransferLog(ethlog *types.Log) (from, to common.Address, tokenID *big.Int) { tokenID = new(big.Int) if len(ethlog.Topics) < erc721TransferEventIndexedParameters { - log.Warn("not enough topics for erc721 transfer", "topics", ethlog.Topics) + logutils.ZapLogger().Warn("not enough topics for erc721 transfer", zap.Stringers("topics", ethlog.Topics)) return } var err error from, to, err = getFromToAddresses(*ethlog) if err != nil { - log.Error("log_parser::ParseErc721TransferLog", err) + logutils.ZapLogger().Error("log_parser::ParseErc721TransferLog", zap.Error(err)) return } tokenID.SetBytes(ethlog.Topics[3][:]) @@ -277,7 +279,7 @@ func checkTopicsLength(ethlog types.Log, startIdx, endIdx int) (err error) { for i := startIdx; i < endIdx; i++ { if len(ethlog.Topics[i]) != common.HashLength { err = fmt.Errorf("topic %d is not padded to %d byte address, topic=%s", i, common.HashLength, ethlog.Topics[i]) - log.Error("log_parser::checkTopicsLength", err) + logutils.ZapLogger().Error("log_parser::checkTopicsLength", zap.Error(err)) return } } @@ -340,7 +342,7 @@ func ParseTransferLog(ethlog types.Log) (from, to common.Address, txIDs []common func ParseErc1155TransferLog(ethlog *types.Log, evType EventType) (operator, from, to common.Address, ids, amounts []*big.Int, err error) { if len(ethlog.Topics) < erc1155TransferEventIndexedParameters { err = fmt.Errorf("not enough topics for erc1155 transfer %s, %v", "topics", ethlog.Topics) - log.Error("log_parser::ParseErc1155TransferLog", "err", err) + logutils.ZapLogger().Error("log_parser::ParseErc1155TransferLog", zap.Error(err)) return } @@ -353,20 +355,23 @@ func ParseErc1155TransferLog(ethlog *types.Log, evType EventType) (operator, fro copy(operator[:], ethlog.Topics[1][addressIdx:]) from, to, err = getFromToAddresses(*ethlog) if err != nil { - log.Error("log_parser::ParseErc1155TransferLog", "err", err) + logutils.ZapLogger().Error("log_parser::ParseErc1155TransferLog", zap.Error(err)) return } if len(ethlog.Data) == 0 || len(ethlog.Data)%(common.HashLength*2) != 0 { err = fmt.Errorf("data is not padded to 64 bytes %s, %v", "data", ethlog.Data) - log.Error("log_parser::ParseErc1155TransferLog", "err", err) + logutils.ZapLogger().Error("log_parser::ParseErc1155TransferLog", zap.Error(err)) return } if evType == Erc1155TransferSingleEventType { ids = append(ids, new(big.Int).SetBytes(ethlog.Data[:common.HashLength])) amounts = append(amounts, new(big.Int).SetBytes(ethlog.Data[common.HashLength:])) - log.Debug("log_parser::ParseErc1155TransferSingleLog", "ids", ids, "amounts", amounts) + logutils.ZapLogger().Debug("log_parser::ParseErc1155TransferSingleLog", + zap.Any("ids", ids), + zap.Any("amounts", amounts), + ) } else { // idTypeSize := new(big.Int).SetBytes(ethlog.Data[:common.HashLength]).Uint64() // Left for knowledge // valueTypeSize := new(big.Int).SetBytes(ethlog.Data[common.HashLength : common.HashLength*2]).Uint64() // Left for knowledge @@ -380,14 +385,17 @@ func ParseErc1155TransferLog(ethlog *types.Log, evType EventType) (operator, fro if idsArraySize != valuesArraySize { err = fmt.Errorf("ids and values sizes don't match %d, %d", idsArraySize, valuesArraySize) - log.Error("log_parser::ParseErc1155TransferBatchLog", "err", err) + logutils.ZapLogger().Error("log_parser::ParseErc1155TransferBatchLog", zap.Error(err)) return } initialOffset = initialOffset + int(idsArraySize+1)*common.HashLength for i := 0; i < int(valuesArraySize); i++ { amounts = append(amounts, new(big.Int).SetBytes(ethlog.Data[initialOffset+i*common.HashLength:initialOffset+(i+1)*common.HashLength])) - log.Debug("log_parser::ParseErc1155TransferBatchLog", "id", ids[i], "amount", amounts[i]) + logutils.ZapLogger().Debug("log_parser::ParseErc1155TransferBatchLog", + zap.Any("id", ids[i]), + zap.Any("amount", amounts[i]), + ) } } @@ -408,7 +416,7 @@ func ParseUniswapV2Log(ethlog *types.Log) (pairAddress common.Address, from comm pairAddress = ethlog.Address from, to, err = getFromToAddresses(*ethlog) if err != nil { - log.Error("log_parser::ParseUniswapV2Log", err) + logutils.ZapLogger().Error("log_parser::ParseUniswapV2Log", zap.Error(err)) return } if len(ethlog.Data) != 32*4 { @@ -448,7 +456,7 @@ func ParseUniswapV3Log(ethlog *types.Log) (poolAddress common.Address, sender co poolAddress = ethlog.Address sender, recipient, err = getFromToAddresses(*ethlog) if err != nil { - log.Error("log_parser::ParseUniswapV3Log", err) + logutils.ZapLogger().Error("log_parser::ParseUniswapV3Log", zap.Error(err)) return } if len(ethlog.Data) != 32*5 { @@ -509,7 +517,7 @@ func ParseHopBridgeTransferFromL1CompletedLog(ethlog *types.Log) (recipient comm recipient, relayer, err = getFromToAddresses(*ethlog) if err != nil { - log.Error("log_parser::ParseHopBridgeTransferFromL1CompletedLog", err) + logutils.ZapLogger().Error("log_parser::ParseHopBridgeTransferFromL1CompletedLog", zap.Error(err)) return } diff --git a/services/wallet/common/mock/feed_subscription.go b/services/wallet/common/mock/feed_subscription.go index b508ded5fe3..3842b1911ea 100644 --- a/services/wallet/common/mock/feed_subscription.go +++ b/services/wallet/common/mock/feed_subscription.go @@ -4,6 +4,7 @@ import ( "time" "github.com/ethereum/go-ethereum/event" + "github.com/status-im/status-go/common" "github.com/status-im/status-go/services/wallet/walletevent" ) @@ -20,6 +21,7 @@ func NewFeedSubscription(feed *event.Feed) *FeedSubscription { subscription := feed.Subscribe(events) go func() { + defer common.LogOnPanic() <-done subscription.Unsubscribe() close(events) diff --git a/services/wallet/community/manager.go b/services/wallet/community/manager.go index 3d628eaa2e1..8253b3cb8f1 100644 --- a/services/wallet/community/manager.go +++ b/services/wallet/community/manager.go @@ -4,9 +4,11 @@ import ( "database/sql" "encoding/json" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/server" "github.com/status-im/status-go/services/wallet/thirdparty" "github.com/status-im/status-go/services/wallet/walletevent" @@ -72,7 +74,7 @@ func (cm *Manager) fetchCommunityInfo(communityID string, fetcher func() (*third if err != nil { dbErr := cm.setCommunityInfo(communityID, nil) if dbErr != nil { - log.Error("SetCommunityInfo failed", "communityID", communityID, "err", dbErr) + logutils.ZapLogger().Error("SetCommunityInfo failed", zap.String("communityID", communityID), zap.Error(dbErr)) } return nil, err } @@ -91,7 +93,7 @@ func (cm *Manager) FetchCommunityMetadataAsync(communityID string) { defer gocommon.LogOnPanic() communityInfo, err := cm.FetchCommunityMetadata(communityID) if err != nil { - log.Error("FetchCommunityInfo failed", "communityID", communityID, "err", err) + logutils.ZapLogger().Error("FetchCommunityInfo failed", zap.String("communityID", communityID), zap.Error(err)) } cm.signalUpdatedCommunityMetadata(communityID, communityInfo) }() @@ -126,7 +128,7 @@ func (cm *Manager) signalUpdatedCommunityMetadata(communityID string, communityI payload, err := json.Marshal(data) if err != nil { - log.Error("Error marshaling response: %v", err) + logutils.ZapLogger().Error("Error marshaling response", zap.Error(err)) return } diff --git a/services/wallet/history/balance.go b/services/wallet/history/balance.go index 0d0c010ae65..b6da0c9c84d 100644 --- a/services/wallet/history/balance.go +++ b/services/wallet/history/balance.go @@ -7,9 +7,11 @@ import ( "math/big" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) const genesisTimestamp = 1438269988 @@ -59,7 +61,12 @@ func NewBalance(db *BalanceDB) *Balance { // get returns the balance history for the given address from the given timestamp till now func (b *Balance) get(ctx context.Context, chainID uint64, currency string, addresses []common.Address, fromTimestamp uint64) ([]*entry, error) { - log.Debug("Getting balance history", "chainID", chainID, "currency", currency, "address", addresses, "fromTimestamp", fromTimestamp) + logutils.ZapLogger().Debug("Getting balance history", + zap.Uint64("chainID", chainID), + zap.String("currency", currency), + zap.Stringers("address", addresses), + zap.Uint64("fromTimestamp", fromTimestamp), + ) cached, err := b.db.getNewerThan(&assetIdentity{chainID, addresses, currency}, fromTimestamp) if err != nil { @@ -70,7 +77,12 @@ func (b *Balance) get(ctx context.Context, chainID uint64, currency string, addr } func (b *Balance) addEdgePoints(chainID uint64, currency string, addresses []common.Address, fromTimestamp, toTimestamp uint64, data []*entry) (res []*entry, err error) { - log.Debug("Adding edge points", "chainID", chainID, "currency", currency, "address", addresses, "fromTimestamp", fromTimestamp) + logutils.ZapLogger().Debug("Adding edge points", + zap.Uint64("chainID", chainID), + zap.String("currency", currency), + zap.Stringers("address", addresses), + zap.Uint64("fromTimestamp", fromTimestamp), + ) if len(addresses) == 0 { return nil, errors.New("addresses must not be empty") @@ -153,7 +165,13 @@ func timestampBoundaries(fromTimestamp, toTimestamp uint64, data []*entry) (firs } func addPaddingPoints(currency string, addresses []common.Address, toTimestamp uint64, data []*entry, limit int) (res []*entry, err error) { - log.Debug("addPaddingPoints start", "currency", currency, "address", addresses, "len(data)", len(data), "data", data, "limit", limit) + logutils.ZapLogger().Debug("addPaddingPoints start", + zap.String("currency", currency), + zap.Stringers("address", addresses), + zap.Int("len(data)", len(data)), + zap.Any("data", data), + zap.Int("limit", limit), + ) if len(data) < 2 { // Edge points must be added separately during the previous step return nil, errors.New("slice is empty") @@ -192,15 +210,27 @@ func addPaddingPoints(currency string, addresses []common.Address, toTimestamp u } res[index] = entry - log.Debug("Added padding point", "entry", entry, "timestamp", paddingTimestamp, "i", i, "j", j, "index", index) + logutils.ZapLogger().Debug("Added padding point", + zap.Stringer("entry", entry), + zap.Int64("timestamp", paddingTimestamp), + zap.Int("i", i), + zap.Int("j", j), + zap.Int("index", index), + ) i++ } else if paddingTimestamp >= data[j].timestamp { - log.Debug("Kept real point", "entry", data[j], "timestamp", paddingTimestamp, "i", i, "j", j, "index", index) + logutils.ZapLogger().Debug("Kept real point", + zap.Any("entry", data[j]), + zap.Int64("timestamp", paddingTimestamp), + zap.Int("i", i), + zap.Int("j", j), + zap.Int("index", index), + ) j++ } } - log.Debug("addPaddingPoints end", "len(res)", len(res)) + logutils.ZapLogger().Debug("addPaddingPoints end", zap.Int("len(res)", len(res))) return res, nil } diff --git a/services/wallet/history/balance_db.go b/services/wallet/history/balance_db.go index 5be59408487..e6fc6f69c75 100644 --- a/services/wallet/history/balance_db.go +++ b/services/wallet/history/balance_db.go @@ -6,8 +6,10 @@ import ( "fmt" "math/big" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/bigint" ) @@ -57,7 +59,7 @@ func (e *entry) String() string { } func (b *BalanceDB) add(entry *entry) error { - log.Debug("Adding entry to balance_history", "entry", entry) + logutils.ZapLogger().Debug("Adding entry to balance_history", zap.Stringer("entry", entry)) _, err := b.db.Exec("INSERT OR IGNORE INTO balance_history (chain_id, address, currency, block, timestamp, balance) VALUES (?, ?, ?, ?, ?, ?)", entry.chainID, entry.address, entry.tokenSymbol, (*bigint.SQLBigInt)(entry.block), entry.timestamp, (*bigint.SQLBigIntBytes)(entry.balance)) return err diff --git a/services/wallet/history/service.go b/services/wallet/history/service.go index 457f9fd4dd1..670d04dbeb3 100644 --- a/services/wallet/history/service.go +++ b/services/wallet/history/service.go @@ -11,12 +11,14 @@ import ( "sort" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" statustypes "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/params" statusrpc "github.com/status-im/status-go/rpc" @@ -102,7 +104,7 @@ func (s *Service) triggerEvent(eventType walletevent.EventType, account statusty } func (s *Service) Start() { - log.Debug("Starting balance history service") + logutils.ZapLogger().Debug("Starting balance history service") s.startTransfersWatcher() s.startAccountWatcher() @@ -122,7 +124,12 @@ func (s *Service) Start() { } func (s *Service) mergeChainsBalances(chainIDs []uint64, addresses []common.Address, tokenSymbol string, fromTimestamp uint64, data map[uint64][]*entry) ([]*DataPoint, error) { - log.Debug("Merging balances", "address", addresses, "tokenSymbol", tokenSymbol, "fromTimestamp", fromTimestamp, "len(data)", len(data)) + logutils.ZapLogger().Debug("Merging balances", + zap.Stringers("address", addresses), + zap.String("tokenSymbol", tokenSymbol), + zap.Uint64("fromTimestamp", fromTimestamp), + zap.Int("len(data)", len(data)), + ) toTimestamp := uint64(time.Now().UTC().Unix()) allData := make([]*entry, 0) @@ -246,7 +253,13 @@ func appendPointToSlice(slice []*DataPoint, point *DataPoint) []*DataPoint { // GetBalanceHistory returns token count balance func (s *Service) GetBalanceHistory(ctx context.Context, chainIDs []uint64, addresses []common.Address, tokenSymbol string, currencySymbol string, fromTimestamp uint64) ([]*ValuePoint, error) { - log.Debug("GetBalanceHistory", "chainIDs", chainIDs, "address", addresses, "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "fromTimestamp", fromTimestamp) + logutils.ZapLogger().Debug("GetBalanceHistory", + zap.Uint64s("chainIDs", chainIDs), + zap.Stringers("address", addresses), + zap.String("tokenSymbol", tokenSymbol), + zap.String("currencySymbol", currencySymbol), + zap.Uint64("fromTimestamp", fromTimestamp), + ) chainDataMap := make(map[uint64][]*entry) for _, chainID := range chainIDs { @@ -292,13 +305,22 @@ func (s *Service) dataPointsToValuePoints(chainIDs []uint64, tokenSymbol string, if err != nil { err := s.exchange.FetchAndCacheMissingRates(tokenSymbol, currencySymbol) if err != nil { - log.Error("Error fetching exchange rates", "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "err", err) + logutils.ZapLogger().Error("Error fetching exchange rates", + zap.String("tokenSymbol", tokenSymbol), + zap.String("currencySymbol", currencySymbol), + zap.Error(err), + ) return nil, err } lastDayValue, err = s.exchange.GetExchangeRateForDay(tokenSymbol, currencySymbol, lastDayTime) if err != nil { - log.Error("Exchange rate missing for", "tokenSymbol", tokenSymbol, "currencySymbol", currencySymbol, "lastDayTime", lastDayTime, "err", err) + logutils.ZapLogger().Error("Exchange rate missing for", + zap.String("tokenSymbol", tokenSymbol), + zap.String("currencySymbol", currencySymbol), + zap.Time("lastDayTime", lastDayTime), + zap.Error(err), + ) return nil, err } } @@ -318,13 +340,20 @@ func (s *Service) dataPointsToValuePoints(chainIDs []uint64, tokenSymbol string, if lastDayValue > 0 { dayValue = lastDayValue } else { - log.Warn("Exchange rate missing for", "dayTime", dayTime, "err", err) + logutils.ZapLogger().Warn("Exchange rate missing for", + zap.Time("dayTime", dayTime), + zap.Error(err), + ) continue } } else { dayValue, err = s.exchange.GetExchangeRateForDay(tokenSymbol, currencySymbol, dayTime) if err != nil { - log.Warn("Exchange rate missing for", "dayTime", dayTime, "err", err) + logutils.ZapLogger().Warn( + "Exchange rate missing for", + zap.Time("dayTime", dayTime), + zap.Error(err), + ) continue } } @@ -370,7 +399,7 @@ func tokenToValue(tokenCount *big.Int, mainDenominationValue float32, weisInOneM // // expects ctx to have cancellation support and processing to be cancelled by the caller func (s *Service) updateBalanceHistory(ctx context.Context) error { - log.Debug("updateBalanceHistory started") + logutils.ZapLogger().Debug("updateBalanceHistory started") addresses, err := s.accountsDB.GetWalletAddresses() if err != nil { @@ -398,15 +427,27 @@ func (s *Service) updateBalanceHistory(ctx context.Context) error { entries, err := s.balance.db.getEntriesWithoutBalances(network.ChainID, common.Address(address)) if err != nil { - log.Error("Error getting blocks without balances", "chainID", network.ChainID, "address", address.String(), "err", err) + logutils.ZapLogger().Error("Error getting blocks without balances", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Error(err), + ) return err } - log.Debug("Blocks without balances", "chainID", network.ChainID, "address", address.String(), "entries", entries) + logutils.ZapLogger().Debug("Blocks without balances", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Any("entries", entries), + ) client, err := s.rpcClient.EthClient(network.ChainID) if err != nil { - log.Error("Error getting client", "chainID", network.ChainID, "address", address.String(), "err", err) + logutils.ZapLogger().Error("Error getting client", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Error(err), + ) return err } @@ -418,7 +459,7 @@ func (s *Service) updateBalanceHistory(ctx context.Context) error { s.triggerEvent(EventBalanceHistoryUpdateFinished, address, "") } - log.Debug("updateBalanceHistory finished") + logutils.ZapLogger().Debug("updateBalanceHistory finished") return nil } @@ -429,12 +470,22 @@ func (s *Service) addEntriesToDB(ctx context.Context, client chain.ClientInterfa if (entry.tokenAddress == common.Address{}) { // Check in cache balance = s.balanceCache.GetBalance(common.Address(address), network.ChainID, entry.block) - log.Debug("Balance from cache", "chainID", network.ChainID, "address", address.String(), "block", entry.block, "balance", balance) + logutils.ZapLogger().Debug("Balance from cache", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Uint64("block", entry.block.Uint64()), + zap.Stringer("balance", balance), + ) if balance == nil { balance, err = client.BalanceAt(ctx, common.Address(address), entry.block) if err != nil { - log.Error("Error getting balance", "chainID", network.ChainID, "address", address.String(), "err", err, "unwrapped", errors.Unwrap(err)) + logutils.ZapLogger().Error("Error getting balance", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Error(err), + zap.NamedError("unwrapped", errors.Unwrap(err)), + ) return err } time.Sleep(50 * time.Millisecond) // TODO Remove this sleep after fixing exceeding rate limit @@ -444,7 +495,11 @@ func (s *Service) addEntriesToDB(ctx context.Context, client chain.ClientInterfa // Check token first if it is supported token := s.tokenManager.FindTokenByAddress(network.ChainID, entry.tokenAddress) if token == nil { - log.Warn("Token not found", "chainID", network.ChainID, "address", address.String(), "tokenAddress", entry.tokenAddress.String()) + logutils.ZapLogger().Warn("Token not found", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Stringer("tokenAddress", entry.tokenAddress), + ) // TODO Add "supported=false" flag to such tokens to avoid checking them again and again continue // Skip token that we don't have symbol for. For example we don't have tokens in store for sepolia optimism } else { @@ -453,10 +508,20 @@ func (s *Service) addEntriesToDB(ctx context.Context, client chain.ClientInterfa // Check balance for token balance, err = s.tokenManager.GetTokenBalanceAt(ctx, client, common.Address(address), entry.tokenAddress, entry.block) - log.Debug("Balance from token manager", "chainID", network.ChainID, "address", address.String(), "block", entry.block, "balance", balance) + logutils.ZapLogger().Debug("Balance from token manager", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Uint64("block", entry.block.Uint64()), + zap.Stringer("balance", balance), + ) if err != nil { - log.Error("Error getting token balance", "chainID", network.ChainID, "address", address.String(), "tokenAddress", entry.tokenAddress.String(), "err", err) + logutils.ZapLogger().Error("Error getting token balance", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Stringer("tokenAddress", entry.tokenAddress), + zap.Error(err), + ) return err } } @@ -464,7 +529,11 @@ func (s *Service) addEntriesToDB(ctx context.Context, client chain.ClientInterfa entry.balance = balance err = s.balance.db.add(entry) if err != nil { - log.Error("Error adding balance", "chainID", network.ChainID, "address", address.String(), "err", err) + logutils.ZapLogger().Error("Error adding balance", + zap.Uint64("chainID", network.ChainID), + zap.Stringer("address", address), + zap.Error(err), + ) return err } } @@ -478,17 +547,24 @@ func (s *Service) startTransfersWatcher() { } transferLoadedCb := func(chainID uint64, addresses []common.Address, block *big.Int) { - log.Debug("Balance history watcher: transfer loaded:", "chainID", chainID, "addresses", addresses, "block", block.Uint64()) + logutils.ZapLogger().Debug("Balance history watcher: transfer loaded:", + zap.Uint64("chainID", chainID), + zap.Stringers("addresses", addresses), + zap.Uint64("block", block.Uint64()), + ) client, err := s.rpcClient.EthClient(chainID) if err != nil { - log.Error("Error getting client", "chainID", chainID, "err", err) + logutils.ZapLogger().Error("Error getting client", + zap.Uint64("chainID", chainID), + zap.Error(err), + ) return } network := s.networkManager.Find(chainID) if network == nil { - log.Error("Network not found", "chainID", chainID) + logutils.ZapLogger().Error("Network not found", zap.Uint64("chainID", chainID)) return } @@ -497,22 +573,37 @@ func (s *Service) startTransfersWatcher() { for _, address := range addresses { transfers, err := transferDB.GetTransfersByAddressAndBlock(chainID, address, block, 1500) // 1500 is quite arbitrary and far from real, but should be enough to cover all transfers in a block if err != nil { - log.Error("Error getting transfers", "chainID", chainID, "address", address.String(), "err", err) + logutils.ZapLogger().Error("Error getting transfers", + zap.Uint64("chainID", chainID), + zap.Stringer("address", address), + zap.Error(err), + ) continue } if len(transfers) == 0 { - log.Debug("No transfers found", "chainID", chainID, "address", address.String(), "block", block.Uint64()) + logutils.ZapLogger().Debug("No transfers found", + zap.Uint64("chainID", chainID), + zap.Stringer("address", address), + zap.Uint64("block", block.Uint64()), + ) continue } entries := transfersToEntries(address, block, transfers) // TODO Remove address and block after testing that they match unique := removeDuplicates(entries) - log.Debug("Entries after filtering", "entries", entries, "unique", unique) + logutils.ZapLogger().Debug("Entries after filtering", + zap.Any("entries", entries), + zap.Any("unique", unique), + ) err = s.addEntriesToDB(s.serviceContext, client, network, statustypes.Address(address), unique) if err != nil { - log.Error("Error adding entries to DB", "chainID", chainID, "address", address.String(), "err", err) + logutils.ZapLogger().Error("Error adding entries to DB", + zap.Uint64("chainID", chainID), + zap.Stringer("address", address), + zap.Error(err), + ) continue } @@ -588,7 +679,10 @@ func (s *Service) onAccountsChanged(changedAddresses []common.Address, eventType for _, address := range changedAddresses { err := s.balance.db.removeBalanceHistory(address) if err != nil { - log.Error("Error removing balance history", "address", address, "err", err) + logutils.ZapLogger().Error("Error removing balance history", + zap.String("address", address.String()), + zap.Error(err), + ) } } } diff --git a/services/wallet/history/transfers_watcher.go b/services/wallet/history/transfers_watcher.go index 174e8341b9d..cfa879c1c04 100644 --- a/services/wallet/history/transfers_watcher.go +++ b/services/wallet/history/transfers_watcher.go @@ -4,9 +4,11 @@ import ( "context" "math/big" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/transfer" "github.com/status-im/status-go/services/wallet/walletevent" @@ -64,7 +66,7 @@ func watch(ctx context.Context, feed *event.Feed, callback TransfersLoadedCb) er return nil case err := <-sub.Err(): if err != nil { - log.Error("history: transfers watcher subscription failed", "error", err) + logutils.ZapLogger().Error("history: transfers watcher subscription failed", zap.Error(err)) } case ev := <-ch: if ev.Type == transfer.EventNewTransfers { diff --git a/services/wallet/market/market.go b/services/wallet/market/market.go index bfea99d5ac8..1a853c17293 100644 --- a/services/wallet/market/market.go +++ b/services/wallet/market/market.go @@ -5,11 +5,13 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/circuitbreaker" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/thirdparty" "github.com/status-im/status-go/services/wallet/walletevent" ) @@ -101,7 +103,7 @@ func (pm *Manager) makeCall(providers []thirdparty.MarketDataProvider, f func(pr pm.setIsConnected(result.Error() == nil) if result.Error() != nil { - log.Error("Error fetching prices", "error", result.Error()) + logutils.ZapLogger().Error("Error fetching prices", zap.Error(result.Error())) return nil, result.Error() } @@ -114,7 +116,7 @@ func (pm *Manager) FetchHistoricalDailyPrices(symbol string, currency string, li }) if err != nil { - log.Error("Error fetching prices", "error", err) + logutils.ZapLogger().Error("Error fetching prices", zap.Error(err)) return nil, err } @@ -128,7 +130,7 @@ func (pm *Manager) FetchHistoricalHourlyPrices(symbol string, currency string, l }) if err != nil { - log.Error("Error fetching prices", "error", err) + logutils.ZapLogger().Error("Error fetching prices", zap.Error(err)) return nil, err } @@ -142,7 +144,7 @@ func (pm *Manager) FetchTokenMarketValues(symbols []string, currency string) (ma }) if err != nil { - log.Error("Error fetching prices", "error", err) + logutils.ZapLogger().Error("Error fetching prices", zap.Error(err)) return nil, err } @@ -231,7 +233,7 @@ func (pm *Manager) FetchTokenDetails(symbols []string) (map[string]thirdparty.To }) if err != nil { - log.Error("Error fetching prices", "error", err) + logutils.ZapLogger().Error("Error fetching prices", zap.Error(err)) return nil, err } @@ -258,7 +260,7 @@ func (pm *Manager) FetchPrices(symbols []string, currencies []string) (map[strin }) if err != nil { - log.Error("Error fetching prices", "error", err) + logutils.ZapLogger().Error("Error fetching prices", zap.Error(err)) return nil, err } diff --git a/services/wallet/onramp/on_ramp.go b/services/wallet/onramp/on_ramp.go index 81553cf91ed..3d6794839eb 100644 --- a/services/wallet/onramp/on_ramp.go +++ b/services/wallet/onramp/on_ramp.go @@ -4,7 +4,9 @@ import ( "context" "errors" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" ) type Manager struct { @@ -22,7 +24,7 @@ func (c *Manager) GetProviders(ctx context.Context) ([]CryptoOnRamp, error) { for _, provider := range c.providers { cryptoOnRamp, err := provider.GetCryptoOnRamp(ctx) if err != nil { - log.Error("failed to get crypto on ramp", "id", provider.ID(), "error", err) + logutils.ZapLogger().Error("failed to get crypto on ramp", zap.String("id", provider.ID()), zap.Error(err)) continue } diff --git a/services/wallet/reader.go b/services/wallet/reader.go index b0c62551a21..e0c20a898b2 100644 --- a/services/wallet/reader.go +++ b/services/wallet/reader.go @@ -7,13 +7,14 @@ import ( "sync" "time" + "go.uber.org/zap" "golang.org/x/exp/maps" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/market" @@ -269,7 +270,7 @@ func (r *Reader) FetchOrGetCachedWalletBalances(ctx context.Context, clients map if needFetch { _, err := r.FetchBalances(ctx, clients, addresses) if err != nil { - log.Error("FetchOrGetCachedWalletBalances error", "err", err) + logutils.ZapLogger().Error("FetchOrGetCachedWalletBalances error", zap.Error(err)) } } @@ -334,7 +335,7 @@ func tokensToBalancesPerChain(cachedTokens map[common.Address][]token.StorageTok func (r *Reader) fetchBalances(ctx context.Context, clients map[uint64]chain.ClientInterface, addresses []common.Address, tokenAddresses []common.Address) (map[uint64]map[common.Address]map[common.Address]*hexutil.Big, error) { latestBalances, err := r.tokenManager.GetBalancesByChain(ctx, clients, addresses, tokenAddresses) if err != nil { - log.Error("tokenManager.GetBalancesByChain error", "err", err) + logutils.ZapLogger().Error("tokenManager.GetBalancesByChain error", zap.Error(err)) return nil, err } @@ -381,7 +382,7 @@ func toChainBalance( func (r *Reader) getBalance1DayAgo(balance *token.ChainBalance, dayAgoTimestamp int64, symbol string, address common.Address) (*big.Int, error) { balance1DayAgo, err := r.tokenManager.GetTokenHistoricalBalance(address, balance.ChainID, symbol, dayAgoTimestamp) if err != nil { - log.Error("tokenManager.GetTokenHistoricalBalance error", "err", err) + logutils.ZapLogger().Error("tokenManager.GetTokenHistoricalBalance error", zap.Error(err)) return nil, err } @@ -489,7 +490,7 @@ func (r *Reader) GetWalletToken(ctx context.Context, clients map[uint64]chain.Cl group.Add(func(parent context.Context) error { prices, err = r.marketManager.GetOrFetchPrices(tokenSymbols, currencies, market.MaxAgeInSecondsForBalances) if err != nil { - log.Info("marketManager.GetOrFetchPrices err", err) + logutils.ZapLogger().Info("marketManager.GetOrFetchPrices", zap.Error(err)) } return nil }) @@ -497,7 +498,7 @@ func (r *Reader) GetWalletToken(ctx context.Context, clients map[uint64]chain.Cl group.Add(func(parent context.Context) error { tokenDetails, err = r.marketManager.FetchTokenDetails(tokenSymbols) if err != nil { - log.Info("marketManager.FetchTokenDetails err", err) + logutils.ZapLogger().Info("marketManager.FetchTokenDetails", zap.Error(err)) } return nil }) @@ -505,7 +506,7 @@ func (r *Reader) GetWalletToken(ctx context.Context, clients map[uint64]chain.Cl group.Add(func(parent context.Context) error { tokenMarketValues, err = r.marketManager.GetOrFetchTokenMarketValues(tokenSymbols, currency, market.MaxAgeInSecondsForBalances) if err != nil { - log.Info("marketManager.GetOrFetchTokenMarketValues err", err) + logutils.ZapLogger().Info("marketManager.GetOrFetchTokenMarketValues", zap.Error(err)) } return nil }) @@ -598,7 +599,7 @@ func (r *Reader) FetchBalances(ctx context.Context, clients map[uint64]chain.Cli tokenAddresses := getTokenAddresses(allTokens) balances, err := r.fetchBalances(ctx, clients, addresses, tokenAddresses) if err != nil { - log.Error("failed to update balances", "err", err) + logutils.ZapLogger().Error("failed to update balances", zap.Error(err)) return nil, err } @@ -611,7 +612,7 @@ func (r *Reader) FetchBalances(ctx context.Context, clients map[uint64]chain.Cli err = r.persistence.SaveTokens(tokens) if err != nil { - log.Error("failed to save tokens", "err", err) // Do not return error, as it is not critical + logutils.ZapLogger().Error("failed to save tokens", zap.Error(err)) // Do not return error, as it is not critical } r.updateTokenUpdateTimestamp(addresses) diff --git a/services/wallet/errors.go b/services/wallet/routeexecution/errors.go similarity index 91% rename from services/wallet/errors.go rename to services/wallet/routeexecution/errors.go index 77cd8bbe122..d33d4c8f7ef 100644 --- a/services/wallet/errors.go +++ b/services/wallet/routeexecution/errors.go @@ -1,4 +1,4 @@ -package wallet +package routeexecution import ( "github.com/status-im/status-go/errors" diff --git a/services/wallet/routeexecution/manager.go b/services/wallet/routeexecution/manager.go new file mode 100644 index 00000000000..9d73c584906 --- /dev/null +++ b/services/wallet/routeexecution/manager.go @@ -0,0 +1,204 @@ +package routeexecution + +import ( + "context" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/status-im/status-go/eth-node/types" + + status_common "github.com/status-im/status-go/common" + statusErrors "github.com/status-im/status-go/errors" + "github.com/status-im/status-go/services/wallet/requests" + "github.com/status-im/status-go/services/wallet/responses" + "github.com/status-im/status-go/services/wallet/router" + "github.com/status-im/status-go/services/wallet/router/pathprocessor" + "github.com/status-im/status-go/services/wallet/router/sendtype" + "github.com/status-im/status-go/services/wallet/transfer" + "github.com/status-im/status-go/signal" +) + +type Manager struct { + router *router.Router + transactionManager *transfer.TransactionManager + transferController *transfer.Controller +} + +func NewManager(router *router.Router, transactionManager *transfer.TransactionManager, transferController *transfer.Controller) *Manager { + return &Manager{ + router: router, + transactionManager: transactionManager, + transferController: transferController, + } +} + +func (m *Manager) BuildTransactionsFromRoute(ctx context.Context, buildInputParams *requests.RouterBuildTransactionsParams) { + go func() { + defer status_common.LogOnPanic() + + m.router.StopSuggestedRoutesAsyncCalculation() + + var err error + response := &responses.RouterTransactionsForSigning{ + SendDetails: &responses.SendDetails{ + Uuid: buildInputParams.Uuid, + }, + } + + defer func() { + if err != nil { + m.transactionManager.ClearLocalRouterTransactionsData() + err = statusErrors.CreateErrorResponseFromError(err) + response.SendDetails.ErrorResponse = err.(*statusErrors.ErrorResponse) + } + signal.SendWalletEvent(signal.SignRouterTransactions, response) + }() + + route, routeInputParams := m.router.GetBestRouteAndAssociatedInputParams() + if routeInputParams.Uuid != buildInputParams.Uuid { + // should never be here + err = ErrCannotResolveRouteId + return + } + + updateFields(response.SendDetails, routeInputParams) + + // notify client that sending transactions started (has 3 steps, building txs, signing txs, sending txs) + signal.SendWalletEvent(signal.RouterSendingTransactionsStarted, response.SendDetails) + + response.SigningDetails, err = m.transactionManager.BuildTransactionsFromRoute( + route, + m.router.GetPathProcessors(), + transfer.BuildRouteExtraParams{ + AddressFrom: routeInputParams.AddrFrom, + AddressTo: routeInputParams.AddrTo, + Username: routeInputParams.Username, + PublicKey: routeInputParams.PublicKey, + PackID: routeInputParams.PackID.ToInt(), + SlippagePercentage: buildInputParams.SlippagePercentage, + }, + ) + }() +} + +func (m *Manager) SendRouterTransactionsWithSignatures(ctx context.Context, sendInputParams *requests.RouterSendTransactionsParams) { + go func() { + defer status_common.LogOnPanic() + + var ( + err error + routeInputParams requests.RouteInputParams + ) + response := &responses.RouterSentTransactions{ + SendDetails: &responses.SendDetails{ + Uuid: sendInputParams.Uuid, + }, + } + + defer func() { + clearLocalData := true + if routeInputParams.SendType == sendtype.Swap { + // in case of swap don't clear local data if an approval is placed, but swap tx is not sent yet + if m.transactionManager.ApprovalRequiredForPath(pathprocessor.ProcessorSwapParaswapName) && + m.transactionManager.ApprovalPlacedForPath(pathprocessor.ProcessorSwapParaswapName) && + !m.transactionManager.TxPlacedForPath(pathprocessor.ProcessorSwapParaswapName) { + clearLocalData = false + } + } + + if clearLocalData { + m.transactionManager.ClearLocalRouterTransactionsData() + } + + if err != nil { + err = statusErrors.CreateErrorResponseFromError(err) + response.SendDetails.ErrorResponse = err.(*statusErrors.ErrorResponse) + } + signal.SendWalletEvent(signal.RouterTransactionsSent, response) + }() + + _, routeInputParams = m.router.GetBestRouteAndAssociatedInputParams() + if routeInputParams.Uuid != sendInputParams.Uuid { + err = ErrCannotResolveRouteId + return + } + + updateFields(response.SendDetails, routeInputParams) + + err = m.transactionManager.ValidateAndAddSignaturesToRouterTransactions(sendInputParams.Signatures) + if err != nil { + return + } + + ////////////////////////////////////////////////////////////////////////////// + // prepare multitx + var mtType transfer.MultiTransactionType = transfer.MultiTransactionSend + if routeInputParams.SendType == sendtype.Bridge { + mtType = transfer.MultiTransactionBridge + } else if routeInputParams.SendType == sendtype.Swap { + mtType = transfer.MultiTransactionSwap + } + + multiTx := transfer.NewMultiTransaction( + /* Timestamp: */ uint64(time.Now().Unix()), + /* FromNetworkID: */ 0, + /* ToNetworkID: */ 0, + /* FromTxHash: */ common.Hash{}, + /* ToTxHash: */ common.Hash{}, + /* FromAddress: */ routeInputParams.AddrFrom, + /* ToAddress: */ routeInputParams.AddrTo, + /* FromAsset: */ routeInputParams.TokenID, + /* ToAsset: */ routeInputParams.ToTokenID, + /* FromAmount: */ routeInputParams.AmountIn, + /* ToAmount: */ routeInputParams.AmountOut, + /* Type: */ mtType, + /* CrossTxID: */ "", + ) + + _, err = m.transactionManager.InsertMultiTransaction(multiTx) + if err != nil { + return + } + ////////////////////////////////////////////////////////////////////////////// + + response.SentTransactions, err = m.transactionManager.SendRouterTransactions(ctx, multiTx) + + var ( + chainIDs []uint64 + addresses []common.Address + ) + for _, tx := range response.SentTransactions { + chainIDs = append(chainIDs, tx.FromChain) + addresses = append(addresses, common.Address(tx.FromAddress)) + go func(chainId uint64, txHash common.Hash) { + defer status_common.LogOnPanic() + err = m.transactionManager.WatchTransaction(context.Background(), chainId, txHash) + if err != nil { + return + } + }(tx.FromChain, common.Hash(tx.Hash)) + } + err = m.transferController.CheckRecentHistory(chainIDs, addresses) + }() +} + +func updateFields(sd *responses.SendDetails, inputParams requests.RouteInputParams) { + sd.SendType = int(inputParams.SendType) + sd.FromAddress = types.Address(inputParams.AddrFrom) + sd.ToAddress = types.Address(inputParams.AddrTo) + sd.FromToken = inputParams.TokenID + sd.ToToken = inputParams.ToTokenID + if inputParams.AmountIn != nil { + sd.FromAmount = inputParams.AmountIn.String() + } + if inputParams.AmountOut != nil { + sd.ToAmount = inputParams.AmountOut.String() + } + sd.OwnerTokenBeingSent = inputParams.TokenIDIsOwnerToken + sd.Username = inputParams.Username + sd.PublicKey = inputParams.PublicKey + if inputParams.PackID != nil { + sd.PackID = inputParams.PackID.String() + } +} diff --git a/services/wallet/router/router.go b/services/wallet/router/router.go index 799d2f50311..1dd4993fb71 100644 --- a/services/wallet/router/router.go +++ b/services/wallet/router/router.go @@ -8,10 +8,12 @@ import ( "strings" "sync" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/errors" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/services/ens" @@ -597,7 +599,13 @@ func (r *Router) resolveCandidates(ctx context.Context, input *requests.RouteInp } appendProcessorErrorFn := func(processorName string, sendType sendtype.SendType, fromChainID uint64, toChainID uint64, amount *big.Int, err error) { - log.Error("router.resolveCandidates error", "processor", processorName, "sendType", sendType, "fromChainId: ", fromChainID, "toChainId", toChainID, "amount", amount, "err", err) + logutils.ZapLogger().Error("router.resolveCandidates error", + zap.String("processor", processorName), + zap.Int("sendType", int(sendType)), + zap.Uint64("fromChainId", fromChainID), + zap.Uint64("toChainId", toChainID), + zap.Stringer("amount", amount), + zap.Error(err)) mu.Lock() defer mu.Unlock() processorErrors = append(processorErrors, &ProcessorError{ diff --git a/services/wallet/router/router_updates.go b/services/wallet/router/router_updates.go index f35b03effd0..91cb1a68741 100644 --- a/services/wallet/router/router_updates.go +++ b/services/wallet/router/router_updates.go @@ -4,8 +4,10 @@ import ( "context" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc/chain" walletCommon "github.com/status-im/status-go/services/wallet/common" ) @@ -33,7 +35,7 @@ func (r *Router) subscribeForUdates(chainID uint64) error { ethClient, err := r.rpcClient.EthClient(chainID) if err != nil { - log.Error("Failed to get eth client", "error", err) + logutils.ZapLogger().Error("Failed to get eth client", zap.Error(err)) return err } @@ -75,19 +77,19 @@ func (r *Router) subscribeForUdates(chainID uint64) error { var blockNumber uint64 blockNumber, err := ethClient.BlockNumber(ctx) if err != nil { - log.Error("Failed to get block number", "error", err) + logutils.ZapLogger().Error("Failed to get block number", zap.Error(err)) continue } val, ok := r.clientsForUpdatesPerChains.Load(chainID) if !ok { - log.Error("Failed to get fetchingLastBlock", "chain", chainID) + logutils.ZapLogger().Error("Failed to get fetchingLastBlock", zap.Uint64("chain", chainID)) continue } flbLoaded, ok := val.(fetchingLastBlock) if !ok { - log.Error("Failed to get fetchingLastBlock", "chain", chainID) + logutils.ZapLogger().Error("Failed to get fetchingLastBlock", zap.Uint64("chain", chainID)) continue } @@ -97,7 +99,7 @@ func (r *Router) subscribeForUdates(chainID uint64) error { fees, err := r.feesManager.SuggestedFees(ctx, chainID) if err != nil { - log.Error("Failed to get suggested fees", "error", err) + logutils.ZapLogger().Error("Failed to get suggested fees", zap.Error(err)) continue } @@ -110,7 +112,7 @@ func (r *Router) subscribeForUdates(chainID uint64) error { for _, path := range r.activeRoutes.Best { err = r.cacluateFees(ctx, path, fees, false, 0) if err != nil { - log.Error("Failed to calculate fees", "error", err) + logutils.ZapLogger().Error("Failed to calculate fees", zap.Error(err)) continue } } @@ -152,7 +154,7 @@ func (r *Router) unsubscribeFeesUpdateAccrossAllChains() { r.clientsForUpdatesPerChains.Range(func(key, value interface{}) bool { flb, ok := value.(fetchingLastBlock) if !ok { - log.Error("Failed to get fetchingLastBlock", "chain", key) + logutils.ZapLogger().Error("Failed to get fetchingLastBlock", zap.Any("chain", key)) return false } diff --git a/services/wallet/service.go b/services/wallet/service.go index 9ddeb597bb4..fb5e37a0f78 100644 --- a/services/wallet/service.go +++ b/services/wallet/service.go @@ -9,11 +9,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" gethrpc "github.com/ethereum/go-ethereum/rpc" "github.com/status-im/status-go/account" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/params" protocolCommon "github.com/status-im/status-go/protocol/common" @@ -30,6 +30,9 @@ import ( "github.com/status-im/status-go/services/wallet/history" "github.com/status-im/status-go/services/wallet/market" "github.com/status-im/status-go/services/wallet/onramp" + "github.com/status-im/status-go/services/wallet/routeexecution" + "github.com/status-im/status-go/services/wallet/router" + "github.com/status-im/status-go/services/wallet/router/pathprocessor" "github.com/status-im/status-go/services/wallet/thirdparty" "github.com/status-im/status-go/services/wallet/thirdparty/alchemy" "github.com/status-im/status-go/services/wallet/thirdparty/coingecko" @@ -188,6 +191,15 @@ func NewService( featureFlags.EnableCelerBridge = true } + router := router.NewRouter(rpcClient, transactor, tokenManager, marketManager, collectibles, + collectiblesManager, ens, stickers) + pathProcessors := buildPathProcessors(rpcClient, transactor, tokenManager, ens, stickers, featureFlags) + for _, processor := range pathProcessors { + router.AddPathProcessor(processor) + } + + routeExecutionManager := routeexecution.NewManager(router, transactionManager, transferController) + return &Service{ db: db, accountsDB: accountsDB, @@ -217,9 +229,51 @@ func NewService( keycardPairings: NewKeycardPairings(), config: config, featureFlags: featureFlags, + router: router, + routeExecutionManager: routeExecutionManager, } } +func buildPathProcessors( + rpcClient *rpc.Client, + transactor *transactions.Transactor, + tokenManager *token.Manager, + ens *ens.Service, + stickers *stickers.Service, + featureFlags *protocolCommon.FeatureFlags, +) []pathprocessor.PathProcessor { + ret := make([]pathprocessor.PathProcessor, 0) + + transfer := pathprocessor.NewTransferProcessor(rpcClient, transactor) + ret = append(ret, transfer) + + erc721Transfer := pathprocessor.NewERC721Processor(rpcClient, transactor) + ret = append(ret, erc721Transfer) + + erc1155Transfer := pathprocessor.NewERC1155Processor(rpcClient, transactor) + ret = append(ret, erc1155Transfer) + + hop := pathprocessor.NewHopBridgeProcessor(rpcClient, transactor, tokenManager, rpcClient.NetworkManager) + ret = append(ret, hop) + + if featureFlags.EnableCelerBridge { + // TODO: Celar Bridge is out of scope for 2.30, check it thoroughly once we decide to include it again + cbridge := pathprocessor.NewCelerBridgeProcessor(rpcClient, transactor, tokenManager) + ret = append(ret, cbridge) + } + + paraswap := pathprocessor.NewSwapParaswapProcessor(rpcClient, transactor, tokenManager) + ret = append(ret, paraswap) + + ensRegister := pathprocessor.NewENSRegisterProcessor(rpcClient, transactor, ens) + ret = append(ret, ensRegister) + + ensRelease := pathprocessor.NewENSReleaseProcessor(rpcClient, transactor, ens) + ret = append(ret, ensRelease) + + return ret +} + // Service is a wallet service. type Service struct { db *sql.DB @@ -251,6 +305,8 @@ type Service struct { keycardPairings *KeycardPairings config *params.NodeConfig featureFlags *protocolCommon.FeatureFlags + router *router.Router + routeExecutionManager *routeexecution.Manager } // Start signals transmitter. @@ -271,7 +327,8 @@ func (s *Service) SetWalletCommunityInfoProvider(provider thirdparty.CommunityIn // Stop reactor and close db. func (s *Service) Stop() error { - log.Info("wallet will be stopped") + logutils.ZapLogger().Info("wallet will be stopped") + s.router.Stop() s.signals.Stop() s.transferController.Stop() s.currency.Stop() @@ -281,7 +338,7 @@ func (s *Service) Stop() error { s.collectibles.Stop() s.tokenManager.Stop() s.started = false - log.Info("wallet stopped") + logutils.ZapLogger().Info("wallet stopped") return nil } diff --git a/services/wallet/thirdparty/alchemy/client.go b/services/wallet/thirdparty/alchemy/client.go index c91b48f3160..4fe3c6b0711 100644 --- a/services/wallet/thirdparty/alchemy/client.go +++ b/services/wallet/thirdparty/alchemy/client.go @@ -11,10 +11,11 @@ import ( "time" "github.com/cenkalti/backoff/v4" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" walletCommon "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/connection" "github.com/status-im/status-go/services/wallet/thirdparty" @@ -82,7 +83,7 @@ type Client struct { func NewClient(apiKeys map[uint64]string) *Client { for _, chainID := range walletCommon.AllChainIDs() { if apiKeys[uint64(chainID)] == "" { - log.Warn("Alchemy API key not available for", "chainID", chainID) + logutils.ZapLogger().Warn("Alchemy API key not available for", zap.Stringer("chainID", chainID)) } } @@ -144,7 +145,11 @@ func (o *Client) doWithRetries(req *http.Request) (*http.Response, error) { err = fmt.Errorf("unsuccessful request: %d %s", resp.StatusCode, http.StatusText(resp.StatusCode)) if resp.StatusCode == http.StatusTooManyRequests { - log.Error("doWithRetries failed with http.StatusTooManyRequests", "provider", o.ID(), "elapsed time", b.GetElapsedTime(), "next backoff", b.NextBackOff()) + logutils.ZapLogger().Error("doWithRetries failed with http.StatusTooManyRequests", + zap.String("provider", o.ID()), + zap.Duration("elapsed time", b.GetElapsedTime()), + zap.Duration("next backoff", b.NextBackOff()), + ) return nil, err } return nil, backoff.Permanent(err) diff --git a/services/wallet/thirdparty/opensea/client_v2.go b/services/wallet/thirdparty/opensea/client_v2.go index 4d232fdf527..b53ca75d7c0 100644 --- a/services/wallet/thirdparty/opensea/client_v2.go +++ b/services/wallet/thirdparty/opensea/client_v2.go @@ -9,8 +9,8 @@ import ( "strings" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" walletCommon "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/connection" "github.com/status-im/status-go/services/wallet/thirdparty" @@ -61,7 +61,7 @@ type ClientV2 struct { // new opensea v2 client. func NewClientV2(apiKey string, httpClient *HTTPClient) *ClientV2 { if apiKey == "" { - log.Warn("OpenseaV2 API key not available") + logutils.ZapLogger().Warn("OpenseaV2 API key not available") } return &ClientV2{ diff --git a/services/wallet/thirdparty/opensea/http_client.go b/services/wallet/thirdparty/opensea/http_client.go index c038c720610..60089b7ec8e 100644 --- a/services/wallet/thirdparty/opensea/http_client.go +++ b/services/wallet/thirdparty/opensea/http_client.go @@ -8,7 +8,9 @@ import ( "sync" "time" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" ) const requestTimeout = 5 * time.Second @@ -57,7 +59,7 @@ func (o *HTTPClient) doGetRequest(ctx context.Context, url string, apiKey string } defer func() { if err := resp.Body.Close(); err != nil { - log.Error("failed to close opensea request body", "err", err) + logutils.ZapLogger().Error("failed to close opensea request body", zap.Error(err)) } }() diff --git a/services/wallet/thirdparty/rarible/client.go b/services/wallet/thirdparty/rarible/client.go index bf32df48151..a41fe5a9b20 100644 --- a/services/wallet/thirdparty/rarible/client.go +++ b/services/wallet/thirdparty/rarible/client.go @@ -12,10 +12,11 @@ import ( "time" "github.com/cenkalti/backoff/v4" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" walletCommon "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/connection" "github.com/status-im/status-go/services/wallet/thirdparty" @@ -91,11 +92,11 @@ type Client struct { func NewClient(mainnetAPIKey string, testnetAPIKey string) *Client { if mainnetAPIKey == "" { - log.Warn("Rarible API key not available for Mainnet") + logutils.ZapLogger().Warn("Rarible API key not available for Mainnet") } if testnetAPIKey == "" { - log.Warn("Rarible API key not available for Testnet") + logutils.ZapLogger().Warn("Rarible API key not available for Testnet") } return &Client{ @@ -168,7 +169,11 @@ func (o *Client) doWithRetries(req *http.Request, apiKey string) (*http.Response err = fmt.Errorf("unsuccessful request: %d %s", resp.StatusCode, http.StatusText(resp.StatusCode)) if resp.StatusCode == http.StatusTooManyRequests { - log.Error("doWithRetries failed with http.StatusTooManyRequests", "provider", o.ID(), "elapsed time", b.GetElapsedTime(), "next backoff", b.NextBackOff()) + logutils.ZapLogger().Error("doWithRetries failed with http.StatusTooManyRequests", + zap.String("provider", o.ID()), + zap.Duration("elapsed time", b.GetElapsedTime()), + zap.Duration("next backoff", b.NextBackOff()), + ) return nil, err } return nil, backoff.Permanent(err) diff --git a/services/wallet/token/balancefetcher/balance_fetcher.go b/services/wallet/token/balancefetcher/balance_fetcher.go index 4c739579342..f040d2d5161 100644 --- a/services/wallet/token/balancefetcher/balance_fetcher.go +++ b/services/wallet/token/balancefetcher/balance_fetcher.go @@ -7,13 +7,15 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/contracts" "github.com/status-im/status-go/contracts/ethscan" "github.com/status-im/status-go/contracts/ierc20" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/services/wallet/async" ) @@ -67,7 +69,7 @@ func (bf *DefaultBalanceFetcher) fetchBalancesForChain(parent context.Context, c ethScanContract, availableAtBlock, err := bf.contractMaker.NewEthScan(client.NetworkID()) if err != nil { - log.Error("error scanning contract", "err", err) + logutils.ZapLogger().Error("error scanning contract", zap.Error(err)) return nil, err } @@ -139,7 +141,7 @@ func (bf *DefaultBalanceFetcher) FetchChainBalances(parent context.Context, acco BlockNumber: atBlock, }, accounts) if err != nil { - log.Error("can't fetch chain balance 5", "err", err) + logutils.ZapLogger().Error("can't fetch chain balance 5", zap.Error(err)) return nil, err } for idx, account := range accounts { @@ -163,12 +165,17 @@ func (bf *DefaultBalanceFetcher) FetchTokenBalancesWithScanContract(ctx context. BlockNumber: atBlock, }, account, chunk) if err != nil { - log.Error("can't fetch erc20 token balance 6", "account", account, "error", err) + logutils.ZapLogger().Error("can't fetch erc20 token balance 6", zap.Stringer("account", account), zap.Error(err)) return nil, err } if len(res) != len(chunk) { - log.Error("can't fetch erc20 token balance 7", "account", account, "error", "response not complete", "expected", len(chunk), "got", len(res)) + logutils.ZapLogger().Error("can't fetch erc20 token balance 7", + zap.Stringer("account", account), + zap.Error(errors.New("response not complete")), + zap.Int("expected", len(chunk)), + zap.Int("got", len(res)), + ) return nil, errors.New("response not complete") } @@ -194,7 +201,11 @@ func (bf *DefaultBalanceFetcher) fetchTokenBalancesWithTokenContracts(ctx contex balance, err := bf.GetTokenBalanceAt(ctx, client, account, token, atBlock) if err != nil { if err != bind.ErrNoCode { - log.Error("can't fetch erc20 token balance 8", "account", account, "token", token, "error", "on fetching token balance") + logutils.ZapLogger().Error("can't fetch erc20 token balance 8", + zap.Stringer("account", account), + zap.Stringer("token", token), + zap.Error(errors.New("on fetching token balance")), + ) return nil, err } } diff --git a/services/wallet/token/token.go b/services/wallet/token/token.go index 08d2b54b9ab..fa47b7b565f 100644 --- a/services/wallet/token/token.go +++ b/services/wallet/token/token.go @@ -14,15 +14,17 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/contracts" "github.com/status-im/status-go/contracts/community-tokens/assets" eth_node_types "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/params" "github.com/status-im/status-go/protocol/communities/token" @@ -358,7 +360,10 @@ func (tm *Manager) FindOrCreateTokenByAddress(ctx context.Context, chainID uint6 } func (tm *Manager) MarkAsPreviouslyOwnedToken(token *Token, owner common.Address) (bool, error) { - log.Info("Marking token as previously owned", "token", token, "owner", owner) + logutils.ZapLogger().Info("Marking token as previously owned", + zap.Any("token", token), + zap.Stringer("owner", owner), + ) if token == nil { return false, errors.New("token is nil") } @@ -376,7 +381,10 @@ func (tm *Manager) MarkAsPreviouslyOwnedToken(token *Token, owner common.Address } else { for _, t := range tokens[owner] { if t.Address == token.Address && t.ChainID == token.ChainID && t.Symbol == token.Symbol { - log.Info("Token already marked as previously owned", "token", token, "owner", owner) + logutils.ZapLogger().Info("Token already marked as previously owned", + zap.Any("token", token), + zap.Stringer("owner", owner), + ) return false, nil } } @@ -426,7 +434,7 @@ func (tm *Manager) discoverTokenCommunityID(ctx context.Context, token *Token, a update, err := tm.db.Prepare("UPDATE tokens SET community_id=? WHERE network_id=? AND address=?") if err != nil { - log.Error("Cannot prepare token update query", err) + logutils.ZapLogger().Error("Cannot prepare token update query", zap.Error(err)) return } @@ -434,7 +442,7 @@ func (tm *Manager) discoverTokenCommunityID(ctx context.Context, token *Token, a // Update token community ID to prevent further checks _, err := update.Exec("", token.ChainID, token.Address) if err != nil { - log.Error("Cannot update community id", err) + logutils.ZapLogger().Error("Cannot update community id", zap.Error(err)) } return } @@ -452,7 +460,7 @@ func (tm *Manager) discoverTokenCommunityID(ctx context.Context, token *Token, a _, err = update.Exec(communityID, token.ChainID, token.Address) if err != nil { - log.Error("Cannot update community id", err) + logutils.ZapLogger().Error("Cannot update community id", zap.Error(err)) } } @@ -488,7 +496,7 @@ func (tm *Manager) getNativeTokens() ([]*Token, error) { func (tm *Manager) GetAllTokens() ([]*Token, error) { allTokens, err := tm.GetCustoms(true) if err != nil { - log.Error("can't fetch custom tokens", "error", err) + logutils.ZapLogger().Error("can't fetch custom tokens", zap.Error(err)) } allTokens = append(tm.getTokens(), allTokens...) @@ -790,7 +798,7 @@ func (tm *Manager) onAccountsChange(changedAddresses []common.Address, eventType for _, account := range changedAddresses { err := tm.removeTokenBalances(account) if err != nil { - log.Error("token.Manager: can't remove token balances", "error", err) + logutils.ZapLogger().Error("token.Manager: can't remove token balances", zap.Error(err)) } } } diff --git a/services/wallet/transfer/block_dao.go b/services/wallet/transfer/block_dao.go index e1d3736295c..3c21746de21 100644 --- a/services/wallet/transfer/block_dao.go +++ b/services/wallet/transfer/block_dao.go @@ -4,8 +4,10 @@ import ( "database/sql" "math/big" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/bigint" ) @@ -69,7 +71,11 @@ func (b *BlockDAO) mergeRanges(chainID uint64, account common.Address) (err erro return err } - log.Info("merge old ranges", "account", account, "network", chainID, "ranges", len(ranges)) + logutils.ZapLogger().Info("merge old ranges", + zap.Stringer("account", account), + zap.Uint64("network", chainID), + zap.Int("ranges", len(ranges)), + ) if len(ranges) <= 1 { return nil @@ -108,7 +114,15 @@ func (b *BlockDAO) mergeRanges(chainID uint64, account common.Address) (err erro } func (b *BlockDAO) insertRange(chainID uint64, account common.Address, from, to, balance *big.Int, nonce uint64) error { - log.Debug("insert blocks range", "account", account, "network id", chainID, "from", from, "to", to, "balance", balance, "nonce", nonce) + logutils.ZapLogger().Debug( + "insert blocks range", + zap.Stringer("account", account), + zap.Uint64("network id", chainID), + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Stringer("balance", balance), + zap.Uint64("nonce", nonce), + ) insert, err := b.db.Prepare("INSERT INTO blocks_ranges (network_id, address, blk_from, blk_to, balance, nonce) VALUES (?, ?, ?, ?, ?, ?)") if err != nil { return err @@ -294,7 +308,7 @@ func (b *BlockDAO) GetLastKnownBlockByAddresses(chainID uint64, addresses []comm for _, address := range addresses { block, err := b.GetLastKnownBlockByAddress(chainID, address) if err != nil { - log.Info("Can't get last block", "error", err) + logutils.ZapLogger().Info("Can't get last block", zap.Error(err)) return nil, nil, err } @@ -333,7 +347,10 @@ func getNewRanges(ranges []*BlocksRange) ([]*BlocksRange, []*BlocksRange) { to: prevTo, }) } - log.Info("blocks ranges gap detected", "from", prevTo, "to", blocksRange.from) + logutils.ZapLogger().Info("blocks ranges gap detected", + zap.Stringer("from", prevTo), + zap.Stringer("to", blocksRange.from), + ) hasMergedRanges = false prevFrom = blocksRange.from @@ -353,14 +370,19 @@ func getNewRanges(ranges []*BlocksRange) ([]*BlocksRange, []*BlocksRange) { } func deleteRange(chainID uint64, creator statementCreator, account common.Address, from *big.Int, to *big.Int) error { - log.Info("delete blocks range", "account", account, "network", chainID, "from", from, "to", to) + logutils.ZapLogger().Info("delete blocks range", + zap.Stringer("account", account), + zap.Uint64("network", chainID), + zap.Stringer("from", from), + zap.Stringer("to", to), + ) delete, err := creator.Prepare(`DELETE FROM blocks_ranges WHERE address = ? AND network_id = ? AND blk_from = ? AND blk_to = ?`) if err != nil { - log.Info("some error", "error", err) + logutils.ZapLogger().Info("some error", zap.Error(err)) return err } @@ -379,7 +401,12 @@ func deleteAllRanges(creator statementCreator, account common.Address) error { } func insertRange(chainID uint64, creator statementCreator, account common.Address, from *big.Int, to *big.Int) error { - log.Info("insert blocks range", "account", account, "network", chainID, "from", from, "to", to) + logutils.ZapLogger().Info("insert blocks range", + zap.Stringer("account", account), + zap.Uint64("network", chainID), + zap.Stringer("from", from), + zap.Stringer("to", to), + ) insert, err := creator.Prepare("INSERT INTO blocks_ranges (network_id, address, blk_from, blk_to) VALUES (?, ?, ?, ?)") if err != nil { return err diff --git a/services/wallet/transfer/block_ranges_sequential_dao.go b/services/wallet/transfer/block_ranges_sequential_dao.go index 615051b3499..c9e87d27555 100644 --- a/services/wallet/transfer/block_ranges_sequential_dao.go +++ b/services/wallet/transfer/block_ranges_sequential_dao.go @@ -4,8 +4,10 @@ import ( "database/sql" "math/big" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/bigint" ) @@ -134,10 +136,10 @@ func (b *BlockRangeSequentialDAO) getBlockRanges(chainID uint64, addresses []com } func (b *BlockRangeSequentialDAO) deleteRange(account common.Address) error { - log.Debug("delete blocks range", "account", account) + logutils.ZapLogger().Debug("delete blocks range", zap.Stringer("account", account)) delete, err := b.db.Prepare(`DELETE FROM blocks_ranges_sequential WHERE address = ?`) if err != nil { - log.Error("Failed to prepare deletion of sequential block range", "error", err) + logutils.ZapLogger().Error("Failed to prepare deletion of sequential block range", zap.Error(err)) return err } @@ -154,14 +156,16 @@ func (b *BlockRangeSequentialDAO) upsertRange(chainID uint64, account common.Add ethBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange.eth) tokensBlockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange.tokens) - log.Debug("upsert eth and tokens blocks range", - "account", account, "chainID", chainID, - "eth.start", ethBlockRange.Start, - "eth.first", ethBlockRange.FirstKnown, - "eth.last", ethBlockRange.LastKnown, - "tokens.first", tokensBlockRange.FirstKnown, - "tokens.last", tokensBlockRange.LastKnown, - "hash", newBlockRange.balanceCheckHash) + logutils.ZapLogger().Debug("upsert eth and tokens blocks range", + zap.Stringer("account", account), + zap.Uint64("chainID", chainID), + zap.Stringer("eth.start", ethBlockRange.Start), + zap.Stringer("eth.first", ethBlockRange.FirstKnown), + zap.Stringer("eth.last", ethBlockRange.LastKnown), + zap.Stringer("tokens.first", tokensBlockRange.FirstKnown), + zap.Stringer("tokens.last", tokensBlockRange.LastKnown), + zap.String("hash", newBlockRange.balanceCheckHash), + ) var query *sql.Stmt @@ -200,11 +204,14 @@ func (b *BlockRangeSequentialDAO) upsertEthRange(chainID uint64, account common. blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.eth, newBlockRange) - log.Debug("upsert eth blocks range", "account", account, "chainID", chainID, - "start", blockRange.Start, - "first", blockRange.FirstKnown, - "last", blockRange.LastKnown, - "old hash", ethTokensBlockRange.balanceCheckHash) + logutils.ZapLogger().Debug("upsert eth blocks range", + zap.Stringer("account", account), + zap.Uint64("chainID", chainID), + zap.Stringer("start", blockRange.Start), + zap.Stringer("first", blockRange.FirstKnown), + zap.Stringer("last", blockRange.LastKnown), + zap.String("old hash", ethTokensBlockRange.balanceCheckHash), + ) var query *sql.Stmt @@ -237,9 +244,10 @@ func (b *BlockRangeSequentialDAO) updateTokenRange(chainID uint64, account commo blockRange := prepareUpdatedBlockRange(ethTokensBlockRange.tokens, newBlockRange) - log.Debug("update tokens blocks range", - "first", blockRange.FirstKnown, - "last", blockRange.LastKnown) + logutils.ZapLogger().Debug("update tokens blocks range", + zap.Stringer("first", blockRange.FirstKnown), + zap.Stringer("last", blockRange.LastKnown), + ) update, err := b.db.Prepare(`UPDATE blocks_ranges_sequential SET token_blk_start = ?, token_blk_first = ?, token_blk_last = ? WHERE network_id = ? AND address = ?`) if err != nil { diff --git a/services/wallet/transfer/commands.go b/services/wallet/transfer/commands.go index 164996f754d..5e880de03a8 100644 --- a/services/wallet/transfer/commands.go +++ b/services/wallet/transfer/commands.go @@ -6,13 +6,14 @@ import ( "math/big" "time" + "go.uber.org/zap" "golang.org/x/exp/maps" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/balance" @@ -80,8 +81,13 @@ func (c *ethHistoricalCommand) Command() async.Command { } func (c *ethHistoricalCommand) Run(ctx context.Context) (err error) { - log.Debug("eth historical downloader start", "chainID", c.chainClient.NetworkID(), "address", c.address, - "from", c.from.Number, "to", c.to, "noLimit", c.noLimit) + logutils.ZapLogger().Debug("eth historical downloader start", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Stringer("from", c.from.Number), + zap.Stringer("to", c.to), + zap.Bool("noLimit", c.noLimit), + ) start := time.Now() if c.from.Number != nil && c.from.Balance != nil { @@ -95,8 +101,13 @@ func (c *ethHistoricalCommand) Run(ctx context.Context) (err error) { if err != nil { c.error = err - log.Error("failed to find blocks with transfers", "error", err, "chainID", c.chainClient.NetworkID(), - "address", c.address, "from", c.from.Number, "to", c.to) + logutils.ZapLogger().Error("failed to find blocks with transfers", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Stringer("from", c.from.Number), + zap.Stringer("to", c.to), + zap.Error(err), + ) return nil } @@ -104,8 +115,14 @@ func (c *ethHistoricalCommand) Run(ctx context.Context) (err error) { c.resultingFrom = from c.startBlock = startBlock - log.Debug("eth historical downloader finished successfully", "chain", c.chainClient.NetworkID(), - "address", c.address, "from", from, "to", c.to, "total blocks", len(headers), "time", time.Since(start)) + logutils.ZapLogger().Debug("eth historical downloader finished successfully", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Stringer("from", from), + zap.Stringer("to", c.to), + zap.Int("totalBlocks", len(headers)), + zap.Duration("time", time.Since(start)), + ) return nil } @@ -146,8 +163,11 @@ func getErc20BatchSize(chainID uint64) *big.Int { } func (c *erc20HistoricalCommand) Run(ctx context.Context) (err error) { - log.Debug("wallet historical downloader for erc20 transfers start", "chainID", c.chainClient.NetworkID(), - "from", c.from, "to", c.to) + logutils.ZapLogger().Debug("wallet historical downloader for erc20 transfers start", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("from", c.from), + zap.Stringer("to", c.to), + ) start := time.Now() if c.iterator == nil { @@ -155,20 +175,28 @@ func (c *erc20HistoricalCommand) Run(ctx context.Context) (err error) { c.chainClient, c.erc20, getErc20BatchSize(c.chainClient.NetworkID()), c.to, c.from) if err != nil { - log.Error("failed to setup historical downloader for erc20") + logutils.ZapLogger().Error("failed to setup historical downloader for erc20") return err } } for !c.iterator.Finished() { headers, _, _, err := c.iterator.Next(ctx) if err != nil { - log.Error("failed to get next batch", "error", err, "chainID", c.chainClient.NetworkID()) // TODO: stop inifinite command in case of an error that we can't fix like missing trie node + logutils.ZapLogger().Error("failed to get next batch", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Error(err), + ) // TODO: stop inifinite command in case of an error that we can't fix like missing trie node return err } c.foundHeaders = append(c.foundHeaders, headers...) } - log.Debug("wallet historical downloader for erc20 transfers finished", "chainID", c.chainClient.NetworkID(), - "from", c.from, "to", c.to, "time", time.Since(start), "headers", len(c.foundHeaders)) + logutils.ZapLogger().Debug("wallet historical downloader for erc20 transfers finished", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("from", c.from), + zap.Stringer("to", c.to), + zap.Duration("time", time.Since(start)), + zap.Int("headers", len(c.foundHeaders)), + ) return nil } @@ -211,7 +239,11 @@ func (c *transfersCommand) Run(ctx context.Context) (err error) { // Take blocks from cache if available and disrespect the limit // If no blocks are available in cache, take blocks from DB respecting the limit // If no limit is set, take all blocks from DB - log.Debug("start transfersCommand", "chain", c.chainClient.NetworkID(), "address", c.address, "blockNums", c.blockNums) + logutils.ZapLogger().Debug("start transfersCommand", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Stringers("blockNums", c.blockNums), + ) startTs := time.Now() for { @@ -221,11 +253,15 @@ func (c *transfersCommand) Run(ctx context.Context) (err error) { } for _, blockNum := range blocks { - log.Debug("transfersCommand block start", "chain", c.chainClient.NetworkID(), "address", c.address, "block", blockNum) + logutils.ZapLogger().Debug("transfersCommand block start", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Stringer("blockNum", blockNum), + ) allTransfers, err := c.eth.GetTransfersByNumber(ctx, blockNum) if err != nil { - log.Error("getTransfersByBlocks error", "error", err) + logutils.ZapLogger().Error("getTransfersByBlocks error", zap.Error(err)) return err } @@ -235,23 +271,27 @@ func (c *transfersCommand) Run(ctx context.Context) (err error) { // First, try to match to any pre-existing pending/multi-transaction err := c.saveAndConfirmPending(allTransfers, blockNum) if err != nil { - log.Error("saveAndConfirmPending error", "error", err) + logutils.ZapLogger().Error("saveAndConfirmPending error", zap.Error(err)) return err } // Check if multi transaction needs to be created err = c.processMultiTransactions(ctx, allTransfers) if err != nil { - log.Error("processMultiTransactions error", "error", err) + logutils.ZapLogger().Error("processMultiTransactions error", zap.Error(err)) return err } } else { // If no transfers found, that is suspecting, because downloader returned this block as containing transfers - log.Error("no transfers found in block", "chain", c.chainClient.NetworkID(), "address", c.address, "block", blockNum) + logutils.ZapLogger().Error("no transfers found in block", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Stringer("block", blockNum), + ) err = markBlocksAsLoaded(c.chainClient.NetworkID(), c.db.client, c.address, []*big.Int{blockNum}) if err != nil { - log.Error("Mark blocks loaded error", "error", err) + logutils.ZapLogger().Error("Mark blocks loaded error", zap.Error(err)) return err } } @@ -264,20 +304,34 @@ func (c *transfersCommand) Run(ctx context.Context) (err error) { c.notifyOfLatestTransfers(allTransfers, w_common.Erc721Transfer) c.notifyOfLatestTransfers(allTransfers, w_common.Erc1155Transfer) - log.Debug("transfersCommand block end", "chain", c.chainClient.NetworkID(), "address", c.address, - "block", blockNum, "tranfers.len", len(allTransfers), "fetchedTransfers.len", len(c.fetchedTransfers)) + logutils.ZapLogger().Debug("transfersCommand block end", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Stringer("blockNum", blockNum), + zap.Int("transfersLen", len(allTransfers)), + zap.Int("fetchedTransfersLen", len(c.fetchedTransfers)), + ) } if c.blockNums != nil || len(blocks) == 0 || (c.blocksLimit > noBlockLimit && len(blocks) >= c.blocksLimit) { - log.Debug("loadTransfers breaking loop on block limits reached or 0 blocks", "chain", c.chainClient.NetworkID(), - "address", c.address, "limit", c.blocksLimit, "blocks", len(blocks)) + logutils.ZapLogger().Debug("loadTransfers breaking loop on block limits reached or 0 blocks", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Int("limit", c.blocksLimit), + zap.Int("blocks", len(blocks)), + ) break } } - log.Debug("end transfersCommand", "chain", c.chainClient.NetworkID(), "address", c.address, - "blocks.len", len(c.blockNums), "transfers.len", len(c.fetchedTransfers), "in", time.Since(startTs)) + logutils.ZapLogger().Debug("end transfersCommand", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("address", c.address), + zap.Int("blocks.len", len(c.blockNums)), + zap.Int("transfers.len", len(c.fetchedTransfers)), + zap.Duration("in", time.Since(startTs)), + ) return nil } @@ -295,7 +349,7 @@ func (c *transfersCommand) saveAndConfirmPending(allTransfers []Transfer, blockN if resErr == nil { commitErr := tx.Commit() if commitErr != nil { - log.Error("failed to commit", "error", commitErr) + logutils.ZapLogger().Error("failed to commit", zap.Error(commitErr)) } for _, notify := range notifyFunctions { notify() @@ -303,14 +357,14 @@ func (c *transfersCommand) saveAndConfirmPending(allTransfers []Transfer, blockN } else { rollbackErr := tx.Rollback() if rollbackErr != nil { - log.Error("failed to rollback", "error", rollbackErr) + logutils.ZapLogger().Error("failed to rollback", zap.Error(rollbackErr)) } } }() resErr = saveTransfersMarkBlocksLoaded(tx, c.chainClient.NetworkID(), c.address, allTransfers, []*big.Int{blockNum}) if resErr != nil { - log.Error("SaveTransfers error", "error", resErr) + logutils.ZapLogger().Error("SaveTransfers error", zap.Error(resErr)) } return resErr @@ -321,7 +375,7 @@ func externalTransactionOrError(err error, mTID int64) bool { // External transaction downloaded, ignore it return true } else if err != nil { - log.Warn("GetOwnedMultiTransactionID", "error", err) + logutils.ZapLogger().Warn("GetOwnedMultiTransactionID", zap.Error(err)) return true } else if mTID <= 0 { // Existing external transaction, ignore it @@ -350,7 +404,7 @@ func (c *transfersCommand) confirmPendingTransactions(tx *sql.Tx, allTransfers [ mTID = w_common.NewAndSet(existingMTID) } } else if err != nil { - log.Warn("GetOwnedPendingStatus", "error", err) + logutils.ZapLogger().Warn("GetOwnedPendingStatus", zap.Error(err)) continue } @@ -360,7 +414,7 @@ func (c *transfersCommand) confirmPendingTransactions(tx *sql.Tx, allTransfers [ if txType != nil && *txType == transactions.WalletTransfer { notify, err := c.pendingTxManager.DeleteBySQLTx(tx, chainID, txHash) if err != nil && err != transactions.ErrStillPending { - log.Error("DeleteBySqlTx error", "error", err) + logutils.ZapLogger().Error("DeleteBySqlTx error", zap.Error(err)) } notifyFunctions = append(notifyFunctions, notify) } @@ -571,7 +625,10 @@ func loadTransfers(ctx context.Context, blockDAO *BlockDAO, db *Database, transactionManager *TransactionManager, pendingTxManager *transactions.PendingTxTracker, tokenManager *token.Manager, feed *event.Feed) error { - log.Debug("loadTransfers start", "chain", chainClient.NetworkID(), "limit", blocksLimitPerAccount) + logutils.ZapLogger().Debug("loadTransfers start", + zap.Uint64("chain", chainClient.NetworkID()), + zap.Int("limit", blocksLimitPerAccount), + ) start := time.Now() group := async.NewGroup(ctx) @@ -600,9 +657,15 @@ func loadTransfers(ctx context.Context, blockDAO *BlockDAO, db *Database, select { case <-ctx.Done(): - log.Debug("loadTransfers cancelled", "chain", chainClient.NetworkID(), "error", ctx.Err()) + logutils.ZapLogger().Debug("loadTransfers cancelled", + zap.Uint64("chain", chainClient.NetworkID()), + zap.Error(ctx.Err()), + ) case <-group.WaitAsync(): - log.Debug("loadTransfers finished for account", "in", time.Since(start), "chain", chainClient.NetworkID()) + logutils.ZapLogger().Debug("loadTransfers finished for account", + zap.Duration("in", time.Since(start)), + zap.Uint64("chain", chainClient.NetworkID()), + ) } return nil } diff --git a/services/wallet/transfer/commands_sequential.go b/services/wallet/transfer/commands_sequential.go index 0ae61eaa77c..b9e9893c6f8 100644 --- a/services/wallet/transfer/commands_sequential.go +++ b/services/wallet/transfer/commands_sequential.go @@ -6,14 +6,16 @@ import ( "sync/atomic" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" "github.com/status-im/status-go/contracts" nodetypes "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/rpc/chain/rpclimiter" @@ -63,7 +65,7 @@ var requestTimeout = 20 * time.Second func (c *findNewBlocksCommand) detectTransfers(parent context.Context, accounts []common.Address) (*big.Int, []common.Address, error) { bc, err := c.contractMaker.NewBalanceChecker(c.chainClient.NetworkID()) if err != nil { - log.Error("findNewBlocksCommand error creating balance checker", "error", err, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand error creating balance checker", zap.Uint64("chain", c.chainClient.NetworkID()), zap.Error(err)) return nil, nil, err } @@ -78,13 +80,13 @@ func (c *findNewBlocksCommand) detectTransfers(parent context.Context, accounts tokenAddresses = append(tokenAddresses, token.Address) } } - log.Debug("findNewBlocksCommand detectTransfers", "cnt", len(tokenAddresses)) + logutils.ZapLogger().Debug("findNewBlocksCommand detectTransfers", zap.Int("cnt", len(tokenAddresses))) ctx, cancel := context.WithTimeout(parent, requestTimeout) defer cancel() blockNum, hashes, err := bc.BalancesHash(&bind.CallOpts{Context: ctx}, c.accounts, tokenAddresses) if err != nil { - log.Error("findNewBlocksCommand can't get balances hashes", "error", err) + logutils.ZapLogger().Error("findNewBlocksCommand can't get balances hashes", zap.Error(err)) return nil, nil, err } @@ -92,12 +94,21 @@ func (c *findNewBlocksCommand) detectTransfers(parent context.Context, accounts for idx, account := range accounts { blockRange, _, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account) if err != nil { - log.Error("findNewBlocksCommand can't get block range", "error", err, "account", account, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand can't get block range", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, nil, err } checkHash := common.BytesToHash(hashes[idx][:]) - log.Debug("findNewBlocksCommand comparing hashes", "account", account, "network", c.chainClient.NetworkID(), "old hash", blockRange.balanceCheckHash, "new hash", checkHash.String()) + logutils.ZapLogger().Debug("findNewBlocksCommand comparing hashes", + zap.Stringer("account", account), + zap.Uint64("network", c.chainClient.NetworkID()), + zap.String("old hash", blockRange.balanceCheckHash), + zap.Stringer("new hash", checkHash), + ) if checkHash.String() != blockRange.balanceCheckHash { addressesToCheck = append(addressesToCheck, account) } @@ -106,7 +117,11 @@ func (c *findNewBlocksCommand) detectTransfers(parent context.Context, accounts err = c.blockRangeDAO.upsertRange(c.chainClient.NetworkID(), account, blockRange) if err != nil { - log.Error("findNewBlocksCommand can't update balance check", "error", err, "account", account, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand can't update balance check", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, nil, err } } @@ -121,20 +136,31 @@ func (c *findNewBlocksCommand) detectNonceChange(parent context.Context, to *big blockRange, _, err := c.blockRangeDAO.getBlockRange(c.chainClient.NetworkID(), account) if err != nil { - log.Error("findNewBlocksCommand can't get block range", "error", err, "account", account, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand can't get block range", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, err } lastNonceInfo, ok := c.lastNonces[account] if !ok || lastNonceInfo.blockNumber.Cmp(blockRange.eth.LastKnown) != 0 { - log.Debug("Fetching old nonce", "at", blockRange.eth.LastKnown, "acc", account) + logutils.ZapLogger().Debug("Fetching old nonce", + zap.Stringer("at", blockRange.eth.LastKnown), + zap.Stringer("acc", account), + ) if blockRange.eth.LastKnown == nil { blockRange.eth.LastKnown = big.NewInt(0) oldNonce = new(int64) // At 0 block nonce is 0 } else { oldNonce, err = c.balanceCacher.NonceAt(parent, c.chainClient, account, blockRange.eth.LastKnown) if err != nil { - log.Error("findNewBlocksCommand can't get nonce", "error", err, "account", account, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand can't get nonce", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, err } } @@ -144,11 +170,20 @@ func (c *findNewBlocksCommand) detectNonceChange(parent context.Context, to *big newNonce, err := c.balanceCacher.NonceAt(parent, c.chainClient, account, to) if err != nil { - log.Error("findNewBlocksCommand can't get nonce", "error", err, "account", account, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand can't get nonce", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, err } - log.Debug("Comparing nonces", "oldNonce", *oldNonce, "newNonce", *newNonce, "to", to, "acc", account) + logutils.ZapLogger().Debug("Comparing nonces", + zap.Int64p("oldNonce", oldNonce), + zap.Int64p("newNonce", newNonce), + zap.Stringer("to", to), + zap.Stringer("acc", account), + ) if *newNonce != *oldNonce { addressesWithChange[account] = blockRange.eth.LastKnown @@ -188,7 +223,7 @@ func (c *findNewBlocksCommand) Run(parent context.Context) error { } if mnemonicWasNotShown { if acc.AddressWasNotShown { - log.Info("skip findNewBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account) + logutils.ZapLogger().Info("skip findNewBlocksCommand, mnemonic has not been shown and the address has not been shared yet", zap.Stringer("address", account)) continue } } @@ -205,27 +240,33 @@ func (c *findNewBlocksCommand) Run(parent context.Context) error { headNum, accountsWithDetectedChanges, err := c.detectTransfers(parent, accountsToCheck) if err != nil { - log.Error("findNewBlocksCommand error on transfer detection", "error", err, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand error on transfer detection", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return err } c.blockChainState.SetLastBlockNumber(c.chainClient.NetworkID(), headNum.Uint64()) if len(accountsWithDetectedChanges) != 0 { - log.Debug("findNewBlocksCommand detected accounts with changes, proceeding", "accounts", accountsWithDetectedChanges, "from", c.fromBlockNumber) + logutils.ZapLogger().Debug("findNewBlocksCommand detected accounts with changes", + zap.Stringers("accounts", accountsWithDetectedChanges), + zap.Stringer("from", c.fromBlockNumber), + ) err = c.findAndSaveEthBlocks(parent, c.fromBlockNumber, headNum, accountsToCheck) if err != nil { return err } } else if c.iteration%c.nonceCheckIntervalIterations == 0 && len(accountsWithOutsideTransfers) > 0 { - log.Debug("findNewBlocksCommand nonce check", "accounts", accountsWithOutsideTransfers) + logutils.ZapLogger().Debug("findNewBlocksCommand nonce check", zap.Stringers("accounts", accountsWithOutsideTransfers)) accountsWithNonceChanges, err := c.detectNonceChange(parent, headNum, accountsWithOutsideTransfers) if err != nil { return err } if len(accountsWithNonceChanges) > 0 { - log.Debug("findNewBlocksCommand detected nonce diff", "accounts", accountsWithNonceChanges) + logutils.ZapLogger().Debug("findNewBlocksCommand detected nonce diff", zap.Any("accounts", accountsWithNonceChanges)) for account, from := range accountsWithNonceChanges { err = c.findAndSaveEthBlocks(parent, from, headNum, []common.Address{account}) if err != nil { @@ -276,12 +317,18 @@ func (c *findNewBlocksCommand) findAndSaveEthBlocks(parent context.Context, from return err } if acc.AddressWasNotShown { - log.Info("skip findNewBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account) + logutils.ZapLogger().Info("skip findNewBlocksCommand, mnemonic has not been shown and the address has not been shared yet", zap.Stringer("address", account)) continue } } - log.Debug("start findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum) + logutils.ZapLogger().Debug("start findNewBlocksCommand", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Bool("noLimit", c.noLimit), + zap.Stringer("from", fromNum), + zap.Stringer("to", headNum), + ) headers, startBlockNum, err := c.findBlocksWithEthTransfers(parent, account, fromNum, headNum) if err != nil { @@ -289,9 +336,12 @@ func (c *findNewBlocksCommand) findAndSaveEthBlocks(parent context.Context, from } if len(headers) > 0 { - log.Debug("findNewBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", headNum, - "balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), headNum), - "nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), headNum)) + logutils.ZapLogger().Debug("findNewBlocksCommand saving headers", + zap.Int("len", len(headers)), + zap.Stringer("lastBlockNumber", headNum), + zap.Stringer("balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), headNum)), + zap.Int64p("nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), headNum)), + ) err := c.db.SaveBlocks(c.chainClient.NetworkID(), headers) if err != nil { @@ -306,7 +356,13 @@ func (c *findNewBlocksCommand) findAndSaveEthBlocks(parent context.Context, from return err } - log.Debug("end findNewBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", fromNum, "to", headNum) + logutils.ZapLogger().Debug("end findNewBlocksCommand", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Bool("noLimit", c.noLimit), + zap.Stringer("from", fromNum), + zap.Stringer("to", headNum), + ) } return nil @@ -319,12 +375,20 @@ func (c *findNewBlocksCommand) findAndSaveTokenBlocks(parent context.Context, fr const incomingOnly = false erc20Headers, err := c.fastIndexErc20(parent, fromNum, headNum, incomingOnly) if err != nil { - log.Error("findNewBlocksCommand fastIndexErc20", "err", err, "account", c.accounts, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand fastIndexErc20", + zap.Stringers("account", c.accounts), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return err } if len(erc20Headers) > 0 { - log.Debug("findNewBlocksCommand saving headers", "len", len(erc20Headers), "from", fromNum, "to", headNum) + logutils.ZapLogger().Debug("findNewBlocksCommand saving headers", + zap.Int("len", len(erc20Headers)), + zap.Stringer("from", fromNum), + zap.Stringer("to", headNum), + ) // get not loaded headers from DB for all accs and blocks preLoadedTransactions, err := c.db.GetTransactionsToLoad(c.chainClient.NetworkID(), common.Address{}, nil) @@ -346,12 +410,16 @@ func (c *findNewBlocksCommand) findAndSaveTokenBlocks(parent context.Context, fr } func (c *findBlocksCommand) markTokenBlockRangeChecked(accounts []common.Address, from, to *big.Int) error { - log.Debug("markTokenBlockRangeChecked", "chain", c.chainClient.NetworkID(), "from", from.Uint64(), "to", to.Uint64()) + logutils.ZapLogger().Debug("markTokenBlockRangeChecked", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Uint64("from", from.Uint64()), + zap.Uint64("to", to.Uint64()), + ) for _, account := range accounts { err := c.blockRangeDAO.updateTokenRange(c.chainClient.NetworkID(), account, &BlockRange{FirstKnown: from, LastKnown: to}) if err != nil { - log.Error("findNewBlocksCommand upsertTokenRange", "error", err) + logutils.ZapLogger().Error("findNewBlocksCommand upsertTokenRange", zap.Error(err)) return err } } @@ -379,7 +447,13 @@ func filterNewPreloadedTransactions(erc20Headers []*DBHeader, preLoadedTransfers } func (c *findNewBlocksCommand) findBlocksWithEthTransfers(parent context.Context, account common.Address, fromOrig, toOrig *big.Int) (headers []*DBHeader, startBlockNum *big.Int, err error) { - log.Debug("start findNewBlocksCommand::findBlocksWithEthTransfers", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", c.fromBlockNumber, "to", c.toBlockNumber) + logutils.ZapLogger().Debug("start findNewBlocksCommand::findBlocksWithEthTransfers", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Bool("noLimit", c.noLimit), + zap.Stringer("from", c.fromBlockNumber), + zap.Stringer("to", c.toBlockNumber), + ) rangeSize := big.NewInt(int64(c.defaultNodeBlockChunkSize)) @@ -392,7 +466,10 @@ func (c *findNewBlocksCommand) findBlocksWithEthTransfers(parent context.Context for { if from.Cmp(to) == 0 { - log.Debug("findNewBlocksCommand empty range", "from", from, "to", to) + logutils.ZapLogger().Debug("findNewBlocksCommand empty range", + zap.Stringer("from", from), + zap.Stringer("to", to), + ) break } @@ -402,24 +479,40 @@ func (c *findNewBlocksCommand) findBlocksWithEthTransfers(parent context.Context var ethHeaders []*DBHeader newFromBlock, ethHeaders, startBlockNum, err = c.fastIndex(parent, account, c.balanceCacher, fromBlock, to) if err != nil { - log.Error("findNewBlocksCommand checkRange fastIndex", "err", err, "account", account, - "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findNewBlocksCommand checkRange fastIndex", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, nil, err } - log.Debug("findNewBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account, - "startBlock", startBlockNum, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit) + logutils.ZapLogger().Debug("findNewBlocksCommand checkRange", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("account", account), + zap.Stringer("startBlock", startBlockNum), + zap.Stringer("newFromBlock", newFromBlock.Number), + zap.Stringer("toBlockNumber", to), + zap.Bool("noLimit", c.noLimit), + ) headers = append(headers, ethHeaders...) if startBlockNum != nil && startBlockNum.Cmp(from) >= 0 { - log.Debug("Checked all ranges, stop execution", "startBlock", startBlockNum, "from", from, "to", to) + logutils.ZapLogger().Debug("Checked all ranges, stop execution", + zap.Stringer("startBlock", startBlockNum), + zap.Stringer("from", from), + zap.Stringer("to", to), + ) break } nextFrom, nextTo := nextRange(c.defaultNodeBlockChunkSize, newFromBlock.Number, fromOrig) if nextFrom.Cmp(from) == 0 && nextTo.Cmp(to) == 0 { - log.Debug("findNewBlocksCommand empty next range", "from", from, "to", to) + logutils.ZapLogger().Debug("findNewBlocksCommand empty next range", + zap.Stringer("from", from), + zap.Stringer("to", to), + ) break } @@ -427,7 +520,11 @@ func (c *findNewBlocksCommand) findBlocksWithEthTransfers(parent context.Context to = nextTo } - log.Debug("end findNewBlocksCommand::findBlocksWithEthTransfers", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit) + logutils.ZapLogger().Debug("end findNewBlocksCommand::findBlocksWithEthTransfers", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Bool("noLimit", c.noLimit), + ) return headers, startBlockNum, nil } @@ -539,7 +636,12 @@ func (c *findBlocksCommand) ERC20ScanByBalance(parent context.Context, account c } func (c *findBlocksCommand) checkERC20Tail(parent context.Context, account common.Address) ([]*DBHeader, error) { - log.Debug("checkERC20Tail", "account", account, "to block", c.startBlockNumber, "from", c.resFromBlock.Number) + logutils.ZapLogger().Debug( + "checkERC20Tail", + zap.Stringer("account", account), + zap.Stringer("to block", c.startBlockNumber), + zap.Stringer("from", c.resFromBlock.Number), + ) tokens, err := c.tokenManager.GetTokens(c.chainClient.NetworkID()) if err != nil { return nil, err @@ -597,7 +699,13 @@ func (c *findBlocksCommand) checkERC20Tail(parent context.Context, account commo } func (c *findBlocksCommand) Run(parent context.Context) (err error) { - log.Debug("start findBlocksCommand", "accounts", c.accounts, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "from", c.fromBlockNumber, "to", c.toBlockNumber) + logutils.ZapLogger().Debug("start findBlocksCommand", + zap.Any("accounts", c.accounts), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Bool("noLimit", c.noLimit), + zap.Stringer("from", c.fromBlockNumber), + zap.Stringer("to", c.toBlockNumber), + ) account := c.accounts[0] // For now this command supports only 1 account mnemonicWasNotShown, err := c.accountsDB.GetMnemonicWasNotShown() @@ -611,7 +719,7 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) { return err } if account.AddressWasNotShown { - log.Info("skip findBlocksCommand, mnemonic has not been shown and the address has not been shared yet", "address", account) + logutils.ZapLogger().Info("skip findBlocksCommand, mnemonic has not been shown and the address has not been shared yet", zap.Stringer("address", account.Address)) return nil } } @@ -626,7 +734,9 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) { for { if from.Cmp(to) == 0 { - log.Debug("findBlocksCommand empty range", "from", from, "to", to) + logutils.ZapLogger().Debug("findBlocksCommand empty range", + zap.Stringer("from", from), + zap.Stringer("to", to)) break } @@ -635,7 +745,11 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) { if c.fromBlockNumber.Cmp(zero) == 0 && c.startBlockNumber != nil && c.startBlockNumber.Cmp(zero) == 1 { headers, err = c.checkERC20Tail(parent, account) if err != nil { - log.Error("findBlocksCommand checkERC20Tail", "err", err, "account", account, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findBlocksCommand checkERC20Tail", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) break } } @@ -647,9 +761,12 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) { } if len(headers) > 0 { - log.Debug("findBlocksCommand saving headers", "len", len(headers), "lastBlockNumber", to, - "balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), to), - "nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), to)) + logutils.ZapLogger().Debug("findBlocksCommand saving headers", + zap.Int("len", len(headers)), + zap.Stringer("lastBlockNumber", to), + zap.Stringer("balance", c.balanceCacher.Cache().GetBalance(account, c.chainClient.NetworkID(), to)), + zap.Int64p("nonce", c.balanceCacher.Cache().GetNonce(account, c.chainClient.NetworkID(), to)), + ) err = c.db.SaveBlocks(c.chainClient.NetworkID(), headers) if err != nil { @@ -664,7 +781,10 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) { if err != nil { break } - log.Debug("findBlocksCommand reached first ETH transfer and checked erc20 tail", "chain", c.chainClient.NetworkID(), "account", account) + logutils.ZapLogger().Debug("findBlocksCommand reached first ETH transfer and checked erc20 tail", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("account", account), + ) break } @@ -680,20 +800,31 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) { // if we have found first ETH block and we have not reached the start of ETH history yet if c.startBlockNumber != nil && c.fromBlockNumber.Cmp(from) == -1 { - log.Debug("ERC20 tail should be checked", "initial from", c.fromBlockNumber, "actual from", from, "first ETH block", c.startBlockNumber) + logutils.ZapLogger().Debug("ERC20 tail should be checked", + zap.Stringer("initial from", c.fromBlockNumber), + zap.Stringer("actual from", from), + zap.Stringer("first ETH block", c.startBlockNumber), + ) c.reachedETHHistoryStart = true continue } if c.startBlockNumber != nil && c.startBlockNumber.Cmp(from) >= 0 { - log.Debug("Checked all ranges, stop execution", "startBlock", c.startBlockNumber, "from", from, "to", to) + logutils.ZapLogger().Debug("Checked all ranges, stop execution", + zap.Stringer("startBlock", c.startBlockNumber), + zap.Stringer("from", from), + zap.Stringer("to", to), + ) break } nextFrom, nextTo := nextRange(c.defaultNodeBlockChunkSize, c.resFromBlock.Number, c.fromBlockNumber) if nextFrom.Cmp(from) == 0 && nextTo.Cmp(to) == 0 { - log.Debug("findBlocksCommand empty next range", "from", from, "to", to) + logutils.ZapLogger().Debug("findBlocksCommand empty next range", + zap.Stringer("from", from), + zap.Stringer("to", to), + ) break } @@ -701,7 +832,12 @@ func (c *findBlocksCommand) Run(parent context.Context) (err error) { to = nextTo } - log.Debug("end findBlocksCommand", "account", account, "chain", c.chainClient.NetworkID(), "noLimit", c.noLimit, "err", err) + logutils.ZapLogger().Debug("end findBlocksCommand", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Bool("noLimit", c.noLimit), + zap.Error(err), + ) return err } @@ -711,12 +847,17 @@ func (c *findBlocksCommand) blocksFound(headers []*DBHeader) { } func (c *findBlocksCommand) markEthBlockRangeChecked(account common.Address, blockRange *BlockRange) error { - log.Debug("upsert block range", "Start", blockRange.Start, "FirstKnown", blockRange.FirstKnown, "LastKnown", blockRange.LastKnown, - "chain", c.chainClient.NetworkID(), "account", account) + logutils.ZapLogger().Debug("upsert block range", + zap.Stringer("Start", blockRange.Start), + zap.Stringer("FirstKnown", blockRange.FirstKnown), + zap.Stringer("LastKnown", blockRange.LastKnown), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringer("account", account), + ) err := c.blockRangeDAO.upsertEthRange(c.chainClient.NetworkID(), account, blockRange) if err != nil { - log.Error("findBlocksCommand upsertRange", "error", err) + logutils.ZapLogger().Error("findBlocksCommand upsertRange", zap.Error(err)) return err } @@ -731,18 +872,31 @@ func (c *findBlocksCommand) checkRange(parent context.Context, from *big.Int, to newFromBlock, ethHeaders, startBlock, err := c.fastIndex(parent, account, c.balanceCacher, fromBlock, to) if err != nil { - log.Error("findBlocksCommand checkRange fastIndex", "err", err, "account", account, - "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findBlocksCommand checkRange fastIndex", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, err } - log.Debug("findBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account, - "startBlock", startBlock, "newFromBlock", newFromBlock.Number, "toBlockNumber", to, "noLimit", c.noLimit) + logutils.ZapLogger().Debug("findBlocksCommand checkRange", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("account", account), + zap.Stringer("startBlock", startBlock), + zap.Stringer("newFromBlock", newFromBlock.Number), + zap.Stringer("toBlockNumber", to), + zap.Bool("noLimit", c.noLimit), + ) // There could be incoming ERC20 transfers which don't change the balance // and nonce of ETH account, so we keep looking for them erc20Headers, err := c.fastIndexErc20(parent, newFromBlock.Number, to, false) if err != nil { - log.Error("findBlocksCommand checkRange fastIndexErc20", "err", err, "account", account, "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Error("findBlocksCommand checkRange fastIndexErc20", + zap.Stringer("account", account), + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(err), + ) return nil, err } @@ -755,9 +909,14 @@ func (c *findBlocksCommand) checkRange(parent context.Context, from *big.Int, to c.resFromBlock = newFromBlock c.startBlockNumber = startBlock - log.Debug("end findBlocksCommand checkRange", "chainID", c.chainClient.NetworkID(), "account", account, - "c.startBlock", c.startBlockNumber, "newFromBlock", newFromBlock.Number, - "toBlockNumber", to, "c.resFromBlock", c.resFromBlock.Number) + logutils.ZapLogger().Debug("end findBlocksCommand checkRange", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("account", account), + zap.Stringer("c.startBlock", c.startBlockNumber), + zap.Stringer("newFromBlock", newFromBlock.Number), + zap.Stringer("toBlockNumber", to), + zap.Stringer("c.resFromBlock", c.resFromBlock.Number), + ) return } @@ -767,8 +926,12 @@ func loadBlockRangeInfo(chainID uint64, account common.Address, blockDAO BlockRa blockRange, _, err := blockDAO.getBlockRange(chainID, account) if err != nil { - log.Error("failed to load block ranges from database", "chain", chainID, "account", account, - "error", err) + logutils.ZapLogger().Error( + "failed to load block ranges from database", + zap.Uint64("chain", chainID), + zap.Stringer("account", account), + zap.Error(err), + ) return nil, err } @@ -792,7 +955,7 @@ func areAllHistoryBlocksLoadedForAddress(blockRangeDAO BlockRangeDAOer, chainID blockRange, _, err := blockRangeDAO.getBlockRange(chainID, address) if err != nil { - log.Error("findBlocksCommand getBlockRange", "error", err) + logutils.ZapLogger().Error("findBlocksCommand getBlockRange", zap.Error(err)) return false, err } @@ -805,8 +968,12 @@ func (c *findBlocksCommand) fastIndex(ctx context.Context, account common.Addres fromBlock *Block, toBlockNumber *big.Int) (resultingFrom *Block, headers []*DBHeader, startBlock *big.Int, err error) { - log.Debug("fast index started", "chainID", c.chainClient.NetworkID(), "account", account, - "from", fromBlock.Number, "to", toBlockNumber) + logutils.ZapLogger().Debug("fast index started", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("account", account), + zap.Stringer("from", fromBlock.Number), + zap.Stringer("to", toBlockNumber), + ) start := time.Now() group := async.NewGroup(ctx) @@ -826,7 +993,7 @@ func (c *findBlocksCommand) fastIndex(ctx context.Context, account common.Addres select { case <-ctx.Done(): err = ctx.Err() - log.Debug("fast indexer ctx Done", "error", err) + logutils.ZapLogger().Debug("fast indexer ctx Done", zap.Error(err)) return case <-group.WaitAsync(): if command.error != nil { @@ -836,8 +1003,14 @@ func (c *findBlocksCommand) fastIndex(ctx context.Context, account common.Addres resultingFrom = &Block{Number: command.resultingFrom} headers = command.foundHeaders startBlock = command.startBlock - log.Debug("fast indexer finished", "chainID", c.chainClient.NetworkID(), "account", account, "in", time.Since(start), - "startBlock", command.startBlock, "resultingFrom", resultingFrom.Number, "headers", len(headers)) + logutils.ZapLogger().Debug("fast indexer finished", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("account", account), + zap.Duration("in", time.Since(start)), + zap.Stringer("startBlock", command.startBlock), + zap.Stringer("resultingFrom", resultingFrom.Number), + zap.Int("headers", len(headers)), + ) return } } @@ -865,8 +1038,11 @@ func (c *findBlocksCommand) fastIndexErc20(ctx context.Context, fromBlockNumber return nil, ctx.Err() case <-group.WaitAsync(): headers := erc20.foundHeaders - log.Debug("fast indexer Erc20 finished", "chainID", c.chainClient.NetworkID(), - "in", time.Since(start), "headers", len(headers)) + logutils.ZapLogger().Debug("fast indexer Erc20 finished", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Duration("in", time.Since(start)), + zap.Int("headers", len(headers)), + ) return headers, nil } } @@ -880,15 +1056,21 @@ func (c *loadBlocksAndTransfersCommand) startTransfersLoop(ctx context.Context) c.decLoops() }() - log.Debug("loadTransfersLoop start", "chain", c.chainClient.NetworkID()) + logutils.ZapLogger().Debug("loadTransfersLoop start", zap.Uint64("chain", c.chainClient.NetworkID())) for { select { case <-ctx.Done(): - log.Debug("startTransfersLoop done", "chain", c.chainClient.NetworkID(), "error", ctx.Err()) + logutils.ZapLogger().Debug("startTransfersLoop done", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Error(ctx.Err()), + ) return case dbHeaders := <-c.blocksLoadedCh: - log.Debug("loadTransfersOnDemand transfers received", "chain", c.chainClient.NetworkID(), "headers", len(dbHeaders)) + logutils.ZapLogger().Debug("loadTransfersOnDemand transfers received", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Int("headers", len(dbHeaders)), + ) blocksByAddress := map[common.Address][]*big.Int{} // iterate over headers and group them by address @@ -967,7 +1149,10 @@ func (c *loadBlocksAndTransfersCommand) isStarted() bool { } func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) (err error) { - log.Debug("start load all transfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts) + logutils.ZapLogger().Debug("start load all transfers command", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Any("accounts", c.accounts), + ) // Finite processes (to be restarted on error, but stopped on success or context cancel): // fetching transfers for loaded blocks @@ -1015,7 +1200,7 @@ func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) (err error) // It will start loadTransfersCommand which will run until all transfers from DB are loaded or any one failed to load err = c.startFetchingTransfersForLoadedBlocks(finiteGroup) if err != nil { - log.Error("loadBlocksAndTransfersCommand fetchTransfersForLoadedBlocks", "error", err) + logutils.ZapLogger().Error("loadBlocksAndTransfersCommand fetchTransfersForLoadedBlocks", zap.Error(err)) return err } @@ -1027,16 +1212,26 @@ func (c *loadBlocksAndTransfersCommand) Run(parent context.Context) (err error) // It will start findBlocksCommands which will run until success when all blocks are loaded err = c.fetchHistoryBlocks(finiteGroup, c.accounts, fromNum, headNum, c.blocksLoadedCh) if err != nil { - log.Error("loadBlocksAndTransfersCommand fetchHistoryBlocks", "error", err) + logutils.ZapLogger().Error("loadBlocksAndTransfersCommand fetchHistoryBlocks", zap.Error(err)) return err } select { case <-ctx.Done(): - log.Debug("loadBlocksAndTransfers command cancelled", "chain", c.chainClient.NetworkID(), "accounts", c.accounts, "error", ctx.Err()) + logutils.ZapLogger().Debug("loadBlocksAndTransfers command cancelled", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringers("accounts", c.accounts), + zap.Error(ctx.Err()), + ) case <-finiteGroup.WaitAsync(): err = finiteGroup.Error() // if there was an error, rerun the command - log.Debug("end loadBlocksAndTransfers command", "chain", c.chainClient.NetworkID(), "accounts", c.accounts, "error", err, "group", finiteGroup.Name()) + logutils.ZapLogger().Debug( + "end loadBlocksAndTransfers command", + zap.Uint64("chain", c.chainClient.NetworkID()), + zap.Stringers("accounts", c.accounts), + zap.String("group", finiteGroup.Name()), + zap.Error(err), + ) } return err @@ -1071,19 +1266,22 @@ func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocks(group *async.AtomicGr } func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *async.AtomicGroup, account common.Address, fromNum, toNum *big.Int, blocksLoadedCh chan []*DBHeader) error { - - log.Debug("fetchHistoryBlocks start", "chainID", c.chainClient.NetworkID(), "account", account, "omit", c.omitHistory) + logutils.ZapLogger().Debug("fetchHistoryBlocks start", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("account", account), + zap.Bool("omit", c.omitHistory), + ) if c.omitHistory { blockRange := ðTokensBlockRanges{eth: &BlockRange{nil, big.NewInt(0), toNum}, tokens: &BlockRange{nil, big.NewInt(0), toNum}} err := c.blockRangeDAO.upsertRange(c.chainClient.NetworkID(), account, blockRange) - log.Error("fetchHistoryBlocks upsertRange", "error", err) + logutils.ZapLogger().Error("fetchHistoryBlocks upsertRange", zap.Error(err)) return err } blockRange, err := loadBlockRangeInfo(c.chainClient.NetworkID(), account, c.blockRangeDAO) if err != nil { - log.Error("fetchHistoryBlocks loadBlockRangeInfo", "error", err) + logutils.ZapLogger().Error("fetchHistoryBlocks loadBlockRangeInfo", zap.Error(err)) return err } @@ -1132,7 +1330,11 @@ func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *asyn } for _, rangeItem := range ranges { - log.Debug("range item", "r", rangeItem, "n", c.chainClient.NetworkID(), "a", account) + logutils.ZapLogger().Debug("range item", + zap.Stringers("r", rangeItem), + zap.Uint64("n", c.chainClient.NetworkID()), + zap.Stringer("a", account), + ) fbc := &findBlocksCommand{ accounts: []common.Address{account}, @@ -1157,7 +1359,10 @@ func (c *loadBlocksAndTransfersCommand) fetchHistoryBlocksForAccount(group *asyn } func (c *loadBlocksAndTransfersCommand) startFetchingNewBlocks(ctx context.Context, addresses []common.Address, fromNum *big.Int, blocksLoadedCh chan<- []*DBHeader) { - log.Debug("startFetchingNewBlocks start", "chainID", c.chainClient.NetworkID(), "accounts", addresses) + logutils.ZapLogger().Debug("startFetchingNewBlocks start", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringers("accounts", addresses), + ) c.incLoops() go func() { @@ -1192,7 +1397,11 @@ func (c *loadBlocksAndTransfersCommand) startFetchingNewBlocks(ctx context.Conte // No need to wait for the group since it is infinite <-ctx.Done() - log.Debug("startFetchingNewBlocks end", "chainID", c.chainClient.NetworkID(), "accounts", addresses, "error", ctx.Err()) + logutils.ZapLogger().Debug("startFetchingNewBlocks end", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringers("accounts", addresses), + zap.Error(ctx.Err()), + ) }() } @@ -1201,12 +1410,15 @@ func (c *loadBlocksAndTransfersCommand) getBlocksToLoad() (map[common.Address][] for _, account := range c.accounts { blocks, err := c.blockDAO.GetBlocksToLoadByAddress(c.chainClient.NetworkID(), account, numberOfBlocksCheckedPerIteration) if err != nil { - log.Error("loadBlocksAndTransfersCommand GetBlocksToLoadByAddress", "error", err) + logutils.ZapLogger().Error("loadBlocksAndTransfersCommand GetBlocksToLoadByAddress", zap.Error(err)) return nil, err } if len(blocks) == 0 { - log.Debug("fetchTransfers no blocks to load", "chainID", c.chainClient.NetworkID(), "account", account) + logutils.ZapLogger().Debug("fetchTransfers no blocks to load", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringer("account", account), + ) continue } @@ -1214,15 +1426,17 @@ func (c *loadBlocksAndTransfersCommand) getBlocksToLoad() (map[common.Address][] } if len(blocksMap) == 0 { - log.Debug("fetchTransfers no blocks to load", "chainID", c.chainClient.NetworkID()) + logutils.ZapLogger().Debug("fetchTransfers no blocks to load", zap.Uint64("chainID", c.chainClient.NetworkID())) } return blocksMap, nil } func (c *loadBlocksAndTransfersCommand) startFetchingTransfersForLoadedBlocks(group *async.AtomicGroup) error { - - log.Debug("fetchTransfers start", "chainID", c.chainClient.NetworkID(), "accounts", c.accounts) + logutils.ZapLogger().Debug("fetchTransfers start", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringers("accounts", c.accounts), + ) blocksMap, err := c.getBlocksToLoad() if err != nil { @@ -1244,7 +1458,10 @@ func (c *loadBlocksAndTransfersCommand) startFetchingTransfersForLoadedBlocks(gr } group.Add(txCommand.Command()) - log.Debug("fetchTransfers end", "chainID", c.chainClient.NetworkID(), "accounts", c.accounts) + logutils.ZapLogger().Debug("fetchTransfers end", + zap.Uint64("chainID", c.chainClient.NetworkID()), + zap.Stringers("accounts", c.accounts), + ) }() return nil @@ -1263,14 +1480,14 @@ func (c *loadBlocksAndTransfersCommand) notifyHistoryReady(account common.Addres func (c *loadBlocksAndTransfersCommand) areAllTransfersLoaded(account common.Address) (bool, error) { allBlocksLoaded, err := areAllHistoryBlocksLoadedForAddress(c.blockRangeDAO, c.chainClient.NetworkID(), account) if err != nil { - log.Error("loadBlockAndTransfersCommand allHistoryBlocksLoaded", "error", err) + logutils.ZapLogger().Error("loadBlockAndTransfersCommand allHistoryBlocksLoaded", zap.Error(err)) return false, err } if allBlocksLoaded { headers, err := c.blockDAO.GetBlocksToLoadByAddress(c.chainClient.NetworkID(), account, 1) if err != nil { - log.Error("loadBlocksAndTransfersCommand GetFirstSavedBlock", "error", err) + logutils.ZapLogger().Error("loadBlocksAndTransfersCommand GetFirstSavedBlock", zap.Error(err)) return false, err } @@ -1289,7 +1506,7 @@ func getHeadBlockNumber(parent context.Context, chainClient chain.ClientInterfac head, err := chainClient.HeaderByNumber(ctx, nil) cancel() if err != nil { - log.Error("getHeadBlockNumber", "error", err) + logutils.ZapLogger().Error("getHeadBlockNumber", zap.Error(err)) return nil, err } @@ -1297,7 +1514,10 @@ func getHeadBlockNumber(parent context.Context, chainClient chain.ClientInterfac } func nextRange(maxRangeSize int, prevFrom, zeroBlockNumber *big.Int) (*big.Int, *big.Int) { - log.Debug("next range start", "from", prevFrom, "zeroBlockNumber", zeroBlockNumber) + logutils.ZapLogger().Debug("next range start", + zap.Stringer("from", prevFrom), + zap.Stringer("zeroBlockNumber", zeroBlockNumber), + ) rangeSize := big.NewInt(int64(maxRangeSize)) @@ -1307,7 +1527,11 @@ func nextRange(maxRangeSize int, prevFrom, zeroBlockNumber *big.Int) (*big.Int, from = new(big.Int).Set(zeroBlockNumber) } - log.Debug("next range end", "from", from, "to", to, "zeroBlockNumber", zeroBlockNumber) + logutils.ZapLogger().Debug("next range end", + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Stringer("zeroBlockNumber", zeroBlockNumber), + ) return from, to } @@ -1323,12 +1547,19 @@ func createChainClientWithLimiter(client chain.ClientInterface, account common.A // Check if limit is already reached, then skip the comamnd if allow, err := limiter.Allow(accountTag); !allow { - log.Info("fetchHistoryBlocksForAccount limit reached", "account", account, "chain", chainClient.NetworkID(), "error", err) + logutils.ZapLogger().Info("fetchHistoryBlocksForAccount limit reached", + zap.Stringer("account", account), + zap.Uint64("chain", chainClient.NetworkID()), + zap.Error(err), + ) return nil, err } if allow, err := limiter.Allow(transferHistoryTag); !allow { - log.Info("fetchHistoryBlocksForAccount common limit reached", "chain", chainClient.NetworkID(), "error", err) + logutils.ZapLogger().Info("fetchHistoryBlocksForAccount common limit reached", + zap.Uint64("chain", chainClient.NetworkID()), + zap.Error(err), + ) return nil, err } @@ -1336,7 +1567,10 @@ func createChainClientWithLimiter(client chain.ClientInterface, account common.A if limit == nil { err := limiter.SetLimit(accountTag, transferHistoryLimitPerAccount, rpclimiter.LimitInfinitely) if err != nil { - log.Error("fetchHistoryBlocksForAccount SetLimit", "error", err, "accountTag", accountTag) + logutils.ZapLogger().Error("fetchHistoryBlocksForAccount SetLimit", + zap.String("accountTag", accountTag), + zap.Error(err), + ) } } @@ -1344,7 +1578,10 @@ func createChainClientWithLimiter(client chain.ClientInterface, account common.A // after app restart if the limit was reached. Currently there is no way to reset the limit from UI err := limiter.SetLimit(transferHistoryTag, transferHistoryLimit, transferHistoryLimitPeriod) if err != nil { - log.Error("fetchHistoryBlocksForAccount SetLimit", "error", err, "groupTag", transferHistoryTag) + logutils.ZapLogger().Error("fetchHistoryBlocksForAccount SetLimit", + zap.String("groupTag", transferHistoryTag), + zap.Error(err), + ) } chainClient.SetLimiter(limiter) diff --git a/services/wallet/transfer/concurrent.go b/services/wallet/transfer/concurrent.go index b60658e8735..7af2aabce2e 100644 --- a/services/wallet/transfer/concurrent.go +++ b/services/wallet/transfer/concurrent.go @@ -8,9 +8,10 @@ import ( "time" "github.com/pkg/errors" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/async" "github.com/status-im/status-go/services/wallet/balance" ) @@ -98,7 +99,11 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca account common.Address, ranges [][]*big.Int, threadLimit uint32, startBlock *big.Int) ( resRanges [][]*big.Int, headers []*DBHeader, newStartBlock *big.Int, err error) { - log.Debug("start checkRanges", "account", account.Hex(), "ranges len", len(ranges), "startBlock", startBlock) + logutils.ZapLogger().Debug("start checkRanges", + zap.Stringer("account", account), + zap.Int("ranges len", len(ranges)), + zap.Stringer("startBlock", startBlock), + ) ctx, cancel := context.WithTimeout(parent, 30*time.Second) defer cancel() @@ -111,25 +116,40 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca from := blocksRange[0] to := blocksRange[1] - log.Debug("check block range", "from", from, "to", to) + logutils.ZapLogger().Debug("check block range", + zap.Stringer("from", from), + zap.Stringer("to", to), + ) if startBlock != nil { if to.Cmp(newStartBlock) <= 0 { - log.Debug("'to' block is less than 'start' block", "to", to, "startBlock", startBlock) + logutils.ZapLogger().Debug("'to' block is less than 'start' block", + zap.Stringer("to", to), + zap.Stringer("startBlock", startBlock), + ) continue } } c.Add(func(ctx context.Context) error { if from.Cmp(to) >= 0 { - log.Debug("'from' block is greater than or equal to 'to' block", "from", from, "to", to) + logutils.ZapLogger().Debug("'from' block is greater than or equal to 'to' block", + zap.Stringer("from", from), + zap.Stringer("to", to), + ) return nil } - log.Debug("eth transfers comparing blocks", "from", from, "to", to) + logutils.ZapLogger().Debug("eth transfers comparing blocks", + zap.Stringer("from", from), + zap.Stringer("to", to), + ) if startBlock != nil { if to.Cmp(startBlock) <= 0 { - log.Debug("'to' block is less than 'start' block", "to", to, "startBlock", startBlock) + logutils.ZapLogger().Debug("'to' block is less than 'start' block", + zap.Stringer("to", to), + zap.Stringer("startBlock", startBlock), + ) return nil } } @@ -143,7 +163,12 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca return err } if lb.Cmp(hb) == 0 { - log.Debug("balances are equal", "from", from, "to", to, "lb", lb, "hb", hb) + logutils.ZapLogger().Debug("balances are equal", + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Stringer("lb", lb), + zap.Stringer("hb", hb), + ) hn, err := cache.NonceAt(ctx, client, account, to) if err != nil { @@ -151,12 +176,12 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca } // if nonce is zero in a newer block then there is no need to check an older one if *hn == 0 { - log.Debug("zero nonce", "to", to) + logutils.ZapLogger().Debug("zero nonce", zap.Stringer("to", to)) if hb.Cmp(big.NewInt(0)) == 0 { // balance is 0, nonce is 0, we stop checking further, that will be the start block (even though the real one can be a later one) if startBlock != nil { if to.Cmp(newStartBlock) > 0 { - log.Debug("found possible start block, we should not search back", "block", to) + logutils.ZapLogger().Debug("found possible start block, we should not search back", zap.Stringer("block", to)) newStartBlock = to // increase newStartBlock if we found a new higher block } } else { @@ -172,7 +197,12 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca return err } if *ln == *hn { - log.Debug("transaction count is also equal", "from", from, "to", to, "ln", *ln, "hn", *hn) + logutils.ZapLogger().Debug("transaction count is also equal", + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Int64p("ln", ln), + zap.Int64p("hn", hn), + ) return nil } } @@ -190,7 +220,11 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca if err != nil { return err } - log.Debug("balances are not equal", "from", from, "mid", mid, "to", to) + logutils.ZapLogger().Debug("balances are not equal", + zap.Stringer("from", from), + zap.Stringer("mid", mid), + zap.Stringer("to", to), + ) c.PushRange([]*big.Int{mid, to}) c.PushRange([]*big.Int{from, mid}) @@ -208,7 +242,10 @@ func checkRangesWithStartBlock(parent context.Context, client balance.Reader, ca return nil, nil, nil, errors.Wrap(c.Error(), "failed to dowload transfers using concurrent downloader") } - log.Debug("end checkRanges", "account", account.Hex(), "newStartBlock", newStartBlock) + logutils.ZapLogger().Debug("end checkRanges", + zap.Stringer("account", account), + zap.Stringer("newStartBlock", newStartBlock), + ) return c.GetRanges(), c.GetHeaders(), newStartBlock, nil } @@ -222,7 +259,10 @@ func findBlocksWithEthTransfers(parent context.Context, client balance.Reader, c var lvl = 1 for len(ranges) > 0 && lvl <= 30 { - log.Debug("check blocks ranges", "lvl", lvl, "ranges len", len(ranges)) + logutils.ZapLogger().Debug("check blocks ranges", + zap.Int("lvl", lvl), + zap.Int("ranges len", len(ranges)), + ) lvl++ // Check if there are transfers in blocks in ranges. To do that, nonce and balance is checked // the block ranges that have transfers are returned @@ -236,7 +276,11 @@ func findBlocksWithEthTransfers(parent context.Context, client balance.Reader, c headers = append(headers, newHeaders...) if len(newRanges) > 0 { - log.Debug("found new ranges", "account", account, "lvl", lvl, "new ranges len", len(newRanges)) + logutils.ZapLogger().Debug("found new ranges", + zap.Stringer("account", account), + zap.Int("lvl", lvl), + zap.Int("new ranges len", len(newRanges)), + ) } if len(newRanges) > 60 && !noLimit { sort.SliceStable(newRanges, func(i, j int) bool { diff --git a/services/wallet/transfer/controller.go b/services/wallet/transfer/controller.go index 4c1a56f83ca..f07f16706a2 100644 --- a/services/wallet/transfer/controller.go +++ b/services/wallet/transfer/controller.go @@ -6,13 +6,14 @@ import ( "fmt" "math/big" + "go.uber.org/zap" "golang.org/x/exp/slices" // since 1.21, this is in the standard library "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" statusaccounts "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/rpc/chain/rpclimiter" @@ -154,12 +155,12 @@ func (c *Controller) onAccountsChanged(changedAddresses []common.Address, eventT } if c.reactor == nil { - log.Warn("reactor is not initialized") + logutils.ZapLogger().Warn("reactor is not initialized") return } if eventType == accountsevent.EventTypeAdded || eventType == accountsevent.EventTypeRemoved { - log.Debug("list of accounts was changed from a previous version. reactor will be restarted", "new", currentAddresses) + logutils.ZapLogger().Debug("list of accounts was changed from a previous version. reactor will be restarted", zap.Stringers("new", currentAddresses)) chainClients, err := c.rpcClient.EthClients(chainIDs) if err != nil { @@ -168,7 +169,7 @@ func (c *Controller) onAccountsChanged(changedAddresses []common.Address, eventT err = c.reactor.restart(chainClients, currentAddresses) if err != nil { - log.Error("failed to restart reactor with new accounts", "error", err) + logutils.ZapLogger().Error("failed to restart reactor with new accounts", zap.Error(err)) } } } @@ -217,7 +218,7 @@ func (c *Controller) GetTransfersByAddress(ctx context.Context, chainID uint64, rst, err := c.reactor.getTransfersByAddress(ctx, chainID, address, toBlock, limit) if err != nil { - log.Error("[WalletAPI:: GetTransfersByAddress] can't fetch transfers", "err", err) + logutils.ZapLogger().Error("[WalletAPI:: GetTransfersByAddress] can't fetch transfers", zap.Error(err)) return nil, err } @@ -227,7 +228,7 @@ func (c *Controller) GetTransfersByAddress(ctx context.Context, chainID uint64, func (c *Controller) GetTransfersForIdentities(ctx context.Context, identities []TransactionIdentity) ([]View, error) { rst, err := c.db.GetTransfersForIdentities(ctx, identities) if err != nil { - log.Error("[transfer.Controller.GetTransfersForIdentities] DB err", err) + logutils.ZapLogger().Error("[transfer.Controller.GetTransfersForIdentities] DB err", zap.Error(err)) return nil, err } @@ -247,27 +248,27 @@ func (c *Controller) cleanUpRemovedAccount(address common.Address) { // Transfers will be deleted by foreign key constraint by cascade err := deleteBlocks(c.db.client, address) if err != nil { - log.Error("Failed to delete blocks", "error", err) + logutils.ZapLogger().Error("Failed to delete blocks", zap.Error(err)) } err = deleteAllRanges(c.db.client, address) if err != nil { - log.Error("Failed to delete old blocks ranges", "error", err) + logutils.ZapLogger().Error("Failed to delete old blocks ranges", zap.Error(err)) } err = c.blockRangesSeqDAO.deleteRange(address) if err != nil { - log.Error("Failed to delete blocks ranges sequential", "error", err) + logutils.ZapLogger().Error("Failed to delete blocks ranges sequential", zap.Error(err)) } err = c.transactionManager.removeMultiTransactionByAddress(address) if err != nil { - log.Error("Failed to delete multitransactions", "error", err) + logutils.ZapLogger().Error("Failed to delete multitransactions", zap.Error(err)) } rpcLimitsStorage := rpclimiter.NewLimitsDBStorage(c.db.client) err = rpcLimitsStorage.Delete(accountLimiterTag(address)) if err != nil { - log.Error("Failed to delete limits", "error", err) + logutils.ZapLogger().Error("Failed to delete limits", zap.Error(err)) } } @@ -275,7 +276,7 @@ func (c *Controller) cleanupAccountsLeftovers() error { // We clean up accounts that were deleted and soft removed accounts, err := c.accountsDB.GetWalletAddresses() if err != nil { - log.Error("Failed to get accounts", "error", err) + logutils.ZapLogger().Error("Failed to get accounts", zap.Error(err)) return err } @@ -286,7 +287,7 @@ func (c *Controller) cleanupAccountsLeftovers() error { addressesInWalletDB, err := getAddresses(c.db.client) if err != nil { - log.Error("Failed to get addresses from wallet db", "error", err) + logutils.ZapLogger().Error("Failed to get addresses from wallet db", zap.Error(err)) return err } diff --git a/services/wallet/transfer/database.go b/services/wallet/transfer/database.go index 5cd0a7da541..e6518e3b235 100644 --- a/services/wallet/transfer/database.go +++ b/services/wallet/transfer/database.go @@ -9,10 +9,12 @@ import ( "math/big" "reflect" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/bigint" w_common "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/thirdparty" @@ -316,7 +318,7 @@ func insertBlocksWithTransactions(chainID uint64, creator statementCreator, head // Is that correct to set sender as account address? _, err = insertTx.Exec(chainID, header.Address, header.Address, transaction.ID, (*bigint.SQLBigInt)(header.Number), header.Hash, transaction.Type, &JSONBlob{transaction.Log}, logIndex, tokenID, txValue) if err != nil { - log.Error("error saving token transfer", "err", err) + logutils.ZapLogger().Error("error saving token transfer", zap.Error(err)) return err } } @@ -491,7 +493,13 @@ func updateOrInsertTransfersDBFields(creator statementCreator, transfers []trans t.receiptStatus, t.receiptType, t.txHash, t.logIndex, t.receiptBlockHash, t.cumulativeGasUsed, t.contractAddress, t.gasUsed, t.transactionIndex, t.txType, t.txProtected, t.txGas, txGasPrice, txGasTipCap, txGasFeeCap, txValue, t.txNonce, t.txSize, t.tokenAddress, (*bigint.SQLBigIntBytes)(t.tokenID), t.txFrom, t.txTo) if err != nil { - log.Error("can't save transfer", "b-hash", t.blockHash, "b-n", t.blockNumber, "a", t.address, "h", t.id) + logutils.ZapLogger().Error("can't save transfer", + zap.Stringer("b-hash", t.blockHash), + zap.Stringer("b-n", t.blockNumber), + zap.Stringer("a", t.address), + zap.Stringer("h", t.id), + zap.Error(err), + ) return err } } @@ -499,7 +507,13 @@ func updateOrInsertTransfersDBFields(creator statementCreator, transfers []trans for _, t := range transfers { err = removeGasOnlyEthTransfer(creator, t) if err != nil { - log.Error("can't remove gas only eth transfer", "b-hash", t.blockHash, "b-n", t.blockNumber, "a", t.address, "h", t.id, "err", err) + logutils.ZapLogger().Error("can't remove gas only eth transfer", + zap.Stringer("b-hash", t.blockHash), + zap.Stringer("b-n", t.blockNumber), + zap.Stringer("a", t.address), + zap.Stringer("h", t.id), + zap.Error(err), + ) // no return err, since it's not critical } } @@ -522,7 +536,7 @@ func removeGasOnlyEthTransfer(creator statementCreator, t transferDBFields) erro // If there's only one (or none), return without deleting if count <= 1 { - log.Debug("Only one or no transfer found with the same tx_hash, skipping deletion.") + logutils.ZapLogger().Debug("Only one or no transfer found with the same tx_hash, skipping deletion.") return nil } } @@ -540,7 +554,7 @@ func removeGasOnlyEthTransfer(creator statementCreator, t transferDBFields) erro if err != nil { return err } - log.Debug("removeGasOnlyEthTransfer rows deleted", "count", count) + logutils.ZapLogger().Debug("removeGasOnlyEthTransfer rows deleted", zap.Int64("count", count)) return nil } diff --git a/services/wallet/transfer/downloader.go b/services/wallet/transfer/downloader.go index f1bb49722b4..71fd37dc11c 100644 --- a/services/wallet/transfer/downloader.go +++ b/services/wallet/transfer/downloader.go @@ -6,14 +6,15 @@ import ( "math/big" "time" + "go.uber.org/zap" "golang.org/x/exp/slices" // since 1.21, this is in the standard library "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc/chain" w_common "github.com/status-im/status-go/services/wallet/common" ) @@ -202,7 +203,10 @@ func (d *ETHDownloader) getTransfersInBlock(ctx context.Context, blk *types.Bloc areSubTxsCheckedForTxHash := make(map[common.Hash]bool) - log.Debug("getTransfersInBlock", "block", blk.Number(), "transactionsToLoad", len(transactionsToLoad)) + logutils.ZapLogger().Debug("getTransfersInBlock", + zap.Stringer("block", blk.Number()), + zap.Int("transactionsToLoad", len(transactionsToLoad)), + ) for _, t := range transactionsToLoad { receipt, err := getReceipt(address, t.Log.TxHash) @@ -217,7 +221,7 @@ func (d *ETHDownloader) getTransfersInBlock(ctx context.Context, blk *types.Bloc subtransactions, err := d.subTransactionsFromPreloaded(t, tx, receipt, blk) if err != nil { - log.Error("can't fetch subTxs for erc20/erc721/erc1155 transfer", "error", err) + logutils.ZapLogger().Error("can't fetch subTxs for erc20/erc721/erc1155 transfer", zap.Error(err)) return nil, err } rst = append(rst, subtransactions...) @@ -230,14 +234,22 @@ func (d *ETHDownloader) getTransfersInBlock(ctx context.Context, blk *types.Bloc continue } if tx.ChainId().Cmp(big.NewInt(0)) != 0 && tx.ChainId().Cmp(d.chainClient.ToBigInt()) != 0 { - log.Info("chain id mismatch", "tx hash", tx.Hash(), "tx chain id", tx.ChainId(), "expected chain id", d.chainClient.NetworkID()) + logutils.ZapLogger().Info("chain id mismatch", + zap.Stringer("tx hash", tx.Hash()), + zap.Stringer("tx chain id", tx.ChainId()), + zap.Uint64("expected chain id", d.chainClient.NetworkID()), + ) continue } from, err := types.Sender(d.signer, tx) if err != nil { if err == core.ErrTxTypeNotSupported { - log.Error("Tx Type not supported", "tx chain id", tx.ChainId(), "type", tx.Type(), "error", err) + logutils.ZapLogger().Error("Tx Type not supported", + zap.Stringer("tx chain id", tx.ChainId()), + zap.Uint8("type", tx.Type()), + zap.Error(err), + ) continue } return nil, err @@ -266,7 +278,7 @@ func (d *ETHDownloader) getTransfersInBlock(ctx context.Context, blk *types.Bloc if !areSubTxsCheckedForTxHash[tx.Hash()] { subtransactions, err := d.subTransactionsFromTransactionData(address, from, tx, receipt, blk) if err != nil { - log.Error("can't fetch subTxs for eth transfer", "error", err) + logutils.ZapLogger().Error("can't fetch subTxs for eth transfer", zap.Error(err)) return nil, err } rst = append(rst, subtransactions...) @@ -293,7 +305,11 @@ func (d *ETHDownloader) getTransfersInBlock(ctx context.Context, blk *types.Bloc } } } - log.Debug("getTransfersInBlock found", "block", blk.Number(), "len", len(rst), "time", time.Since(startTs)) + logutils.ZapLogger().Debug("getTransfersInBlock found", + zap.Stringer("block", blk.Number()), + zap.Int("len", len(rst)), + zap.Duration("time", time.Since(startTs)), + ) // TODO(dshulyak) test that balance difference was covered by transactions return rst, nil } @@ -377,7 +393,12 @@ func (d *ETHDownloader) fetchTransaction(parent context.Context, txHash common.H } func (d *ETHDownloader) subTransactionsFromPreloaded(preloadedTx *PreloadedTransaction, tx *types.Transaction, receipt *types.Receipt, blk *types.Block) ([]Transfer, error) { - log.Debug("subTransactionsFromPreloaded start", "txHash", tx.Hash().Hex(), "address", preloadedTx.Address, "tokenID", preloadedTx.TokenID, "value", preloadedTx.Value) + logutils.ZapLogger().Debug("subTransactionsFromPreloaded start", + zap.Stringer("txHash", tx.Hash()), + zap.Stringer("address", preloadedTx.Address), + zap.Stringer("tokenID", preloadedTx.TokenID), + zap.Stringer("value", preloadedTx.Value), + ) address := preloadedTx.Address txLog := preloadedTx.Log @@ -398,7 +419,15 @@ func (d *ETHDownloader) subTransactionsFromPreloaded(preloadedTx *PreloadedTrans case w_common.Erc20TransferEventType, w_common.Erc721TransferEventType, w_common.Erc1155TransferSingleEventType, w_common.Erc1155TransferBatchEventType: - log.Debug("subTransactionsFromPreloaded transfer", "eventType", eventType, "logIdx", txLog.Index, "txHash", tx.Hash().Hex(), "address", address.Hex(), "tokenID", preloadedTx.TokenID, "value", preloadedTx.Value, "baseFee", blk.BaseFee().String()) + logutils.ZapLogger().Debug("subTransactionsFromPreloaded transfer", + zap.String("eventType", string(eventType)), + zap.Uint("logIdx", txLog.Index), + zap.Stringer("txHash", tx.Hash()), + zap.Stringer("address", address), + zap.Stringer("tokenID", preloadedTx.TokenID), + zap.Stringer("value", preloadedTx.Value), + zap.Stringer("baseFee", blk.BaseFee()), + ) transfer := Transfer{ Type: w_common.EventTypeToSubtransactionType(eventType), @@ -422,12 +451,20 @@ func (d *ETHDownloader) subTransactionsFromPreloaded(preloadedTx *PreloadedTrans rst = append(rst, transfer) } - log.Debug("subTransactionsFromPreloaded end", "txHash", tx.Hash().Hex(), "address", address.Hex(), "tokenID", preloadedTx.TokenID, "value", preloadedTx.Value) + logutils.ZapLogger().Debug("subTransactionsFromPreloaded end", + zap.Stringer("txHash", tx.Hash()), + zap.Stringer("address", address), + zap.Stringer("tokenID", preloadedTx.TokenID), + zap.Stringer("value", preloadedTx.Value), + ) return rst, nil } func (d *ETHDownloader) subTransactionsFromTransactionData(address, from common.Address, tx *types.Transaction, receipt *types.Receipt, blk *types.Block) ([]Transfer, error) { - log.Debug("subTransactionsFromTransactionData start", "txHash", tx.Hash().Hex(), "address", address) + logutils.ZapLogger().Debug("subTransactionsFromTransactionData start", + zap.Stringer("txHash", tx.Hash()), + zap.Stringer("address", address), + ) rst := make([]Transfer, 0, 1) @@ -458,7 +495,10 @@ func (d *ETHDownloader) subTransactionsFromTransactionData(address, from common. } } - log.Debug("subTransactionsFromTransactionData end", "txHash", tx.Hash().Hex(), "address", address.Hex()) + logutils.ZapLogger().Debug("subTransactionsFromTransactionData end", + zap.Stringer("txHash", tx.Hash()), + zap.Stringer("address", address), + ) return rst, nil } @@ -475,7 +515,11 @@ func (d *ERC20TransfersDownloader) blocksFromLogs(parent context.Context, logs [ var address common.Address from, to, txIDs, tokenIDs, values, err := w_common.ParseTransferLog(l) if err != nil { - log.Error("failed to parse transfer log", "log", l, "address", d.accounts, "error", err) + logutils.ZapLogger().Error("failed to parse transfer log", + zap.Any("log", l), + zap.Stringers("address", d.accounts), + zap.Error(err), + ) continue } @@ -485,7 +529,10 @@ func (d *ERC20TransfersDownloader) blocksFromLogs(parent context.Context, logs [ } else if slices.Contains(d.accounts, to) { address = to } else { - log.Error("from/to address mismatch", "log", l, "addresses", d.accounts) + logutils.ZapLogger().Error("from/to address mismatch", + zap.Any("log", l), + zap.Stringers("addresses", d.accounts), + ) continue } @@ -493,7 +540,12 @@ func (d *ERC20TransfersDownloader) blocksFromLogs(parent context.Context, logs [ logType := w_common.EventTypeToSubtransactionType(eventType) for i, txID := range txIDs { - log.Debug("block from logs", "block", l.BlockNumber, "log", l, "logType", logType, "txID", txID) + logutils.ZapLogger().Debug("block from logs", + zap.Uint64("block", l.BlockNumber), + zap.Any("log", l), + zap.String("logType", string(logType)), + zap.Stringer("txID", txID), + ) // For ERC20 there is no tokenID, so we use nil var tokenID *big.Int @@ -533,11 +585,21 @@ func (d *ERC20TransfersDownloader) blocksFromLogs(parent context.Context, logs [ // time to get logs for 100000 blocks = 1.144686979s. with 249 events in the result set. func (d *ERC20TransfersDownloader) GetHeadersInRange(parent context.Context, from, to *big.Int) ([]*DBHeader, error) { start := time.Now() - log.Debug("get erc20 transfers in range start", "chainID", d.client.NetworkID(), "from", from, "to", to, "accounts", d.accounts) + logutils.ZapLogger().Debug("get erc20 transfers in range start", + zap.Uint64("chainID", d.client.NetworkID()), + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Stringers("accounts", d.accounts), + ) // TODO #16062: Figure out real root cause of invalid range if from != nil && to != nil && from.Cmp(to) > 0 { - log.Error("invalid range", "chainID", d.client.NetworkID(), "from", from, "to", to, "accounts", d.accounts) + logutils.ZapLogger().Error("invalid range", + zap.Uint64("chainID", d.client.NetworkID()), + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Stringers("accounts", d.accounts), + ) return nil, errors.New("invalid range") } @@ -586,7 +648,7 @@ func (d *ERC20TransfersDownloader) GetHeadersInRange(parent context.Context, fro logs := concatLogs(outbound, inboundOrMixed, inbound1155) if len(logs) == 0 { - log.Debug("no logs found for account") + logutils.ZapLogger().Debug("no logs found for account") return nil, nil } @@ -595,15 +657,31 @@ func (d *ERC20TransfersDownloader) GetHeadersInRange(parent context.Context, fro return nil, err } if len(rst) == 0 { - log.Warn("no headers found in logs for account", "chainID", d.client.NetworkID(), "addresses", d.accounts, "from", from, "to", to) + logutils.ZapLogger().Warn("no headers found in logs for account", + zap.Uint64("chainID", d.client.NetworkID()), + zap.Stringers("addresses", d.accounts), + zap.Stringer("from", from), + zap.Stringer("to", to), + ) } else { headers = append(headers, rst...) - log.Debug("found erc20 transfers for account", "chainID", d.client.NetworkID(), "addresses", d.accounts, - "from", from, "to", to, "headers", len(headers)) + logutils.ZapLogger().Debug("found erc20 transfers for account", + zap.Uint64("chainID", d.client.NetworkID()), + zap.Stringers("addresses", d.accounts), + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Int("headers", len(headers)), + ) } - log.Debug("get erc20 transfers in range end", "chainID", d.client.NetworkID(), - "from", from, "to", to, "headers", len(headers), "accounts", d.accounts, "took", time.Since(start)) + logutils.ZapLogger().Debug("get erc20 transfers in range end", + zap.Uint64("chainID", d.client.NetworkID()), + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Int("headers", len(headers)), + zap.Stringers("accounts", d.accounts), + zap.Duration("took", time.Since(start)), + ) return headers, nil } diff --git a/services/wallet/transfer/helpers.go b/services/wallet/transfer/helpers.go index 8733bea221d..9b6ff8ac174 100644 --- a/services/wallet/transfer/helpers.go +++ b/services/wallet/transfer/helpers.go @@ -8,12 +8,14 @@ import ( "math/big" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/account" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" wallet_common "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/router/pathprocessor" ) @@ -206,7 +208,10 @@ func (tm *TransactionManager) removeMultiTransactionByAddress(address common.Add } counterpartyExists, err := tm.accountsDB.AddressExists(types.Address(addressToCheck)) if err != nil { - log.Error("Failed to query accounts db for a given address", "address", address, "error", err) + logutils.ZapLogger().Error("Failed to query accounts db for a given address", + zap.Stringer("address", address), + zap.Error(err), + ) continue } @@ -223,7 +228,7 @@ func (tm *TransactionManager) removeMultiTransactionByAddress(address common.Add for _, id := range ids { err = tm.storage.DeleteMultiTransaction(id) if err != nil { - log.Error("Failed to delete multi transaction", "id", id, "error", err) + logutils.ZapLogger().Error("Failed to delete multi transaction", zap.Int64("id", int64(id)), zap.Error(err)) } } } diff --git a/services/wallet/transfer/iterative.go b/services/wallet/transfer/iterative.go index 580a65a7be0..95af7b2f52e 100644 --- a/services/wallet/transfer/iterative.go +++ b/services/wallet/transfer/iterative.go @@ -5,7 +5,9 @@ import ( "errors" "math/big" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" ) // SetupIterativeDownloader configures IterativeDownloader with last known synced block. @@ -16,7 +18,12 @@ func SetupIterativeDownloader( return nil, errors.New("to or from cannot be nil") } - log.Debug("iterative downloader", "from", from, "to", to, "size", size) + logutils.ZapLogger().Debug("iterative downloader", + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Stringer("size", size), + ) + d := &IterativeDownloader{ client: client, batchSize: size, @@ -63,9 +70,17 @@ func (d *IterativeDownloader) Next(parent context.Context) ([]*DBHeader, *big.In from = d.from } headers, err := d.downloader.GetHeadersInRange(parent, from, to) - log.Debug("load erc20 transfers in range", "from", from, "to", to, "batchSize", d.batchSize) + logutils.ZapLogger().Debug("load erc20 transfers in range", + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Stringer("batchSize", d.batchSize), + ) if err != nil { - log.Error("failed to get transfer in between two blocks", "from", from, "to", to, "error", err) + logutils.ZapLogger().Error("failed to get transfer in between two blocks", + zap.Stringer("from", from), + zap.Stringer("to", to), + zap.Error(err), + ) return nil, nil, nil, err } diff --git a/services/wallet/transfer/sequential_fetch_strategy.go b/services/wallet/transfer/sequential_fetch_strategy.go index 7e99eca5132..44db61833dd 100644 --- a/services/wallet/transfer/sequential_fetch_strategy.go +++ b/services/wallet/transfer/sequential_fetch_strategy.go @@ -5,9 +5,11 @@ import ( "math/big" "sync" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/rpc/chain" "github.com/status-im/status-go/services/wallet/async" @@ -113,12 +115,15 @@ func (s *SequentialFetchStrategy) kind() FetchStrategyType { func (s *SequentialFetchStrategy) getTransfersByAddress(ctx context.Context, chainID uint64, address common.Address, toBlock *big.Int, limit int64) ([]Transfer, error) { - log.Debug("[WalletAPI:: GetTransfersByAddress] get transfers for an address", "address", address, - "chainID", chainID, "toBlock", toBlock, "limit", limit) + logutils.ZapLogger().Debug("[WalletAPI:: GetTransfersByAddress] get transfers for an address", + zap.Stringer("address", address), + zap.Uint64("chainID", chainID), + zap.Stringer("toBlock", toBlock), + zap.Int64("limit", limit)) rst, err := s.db.GetTransfersByAddress(chainID, address, toBlock, limit) if err != nil { - log.Error("[WalletAPI:: GetTransfersByAddress] can't fetch transfers", "err", err) + logutils.ZapLogger().Error("[WalletAPI:: GetTransfersByAddress] can't fetch transfers", zap.Error(err)) return nil, err } diff --git a/services/wallet/transfer/transaction_manager_multitransaction.go b/services/wallet/transfer/transaction_manager_multitransaction.go index a678d590afe..ce9468a65e9 100644 --- a/services/wallet/transfer/transaction_manager_multitransaction.go +++ b/services/wallet/transfer/transaction_manager_multitransaction.go @@ -7,11 +7,12 @@ import ( "time" "github.com/pkg/errors" + "go.uber.org/zap" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/account" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" wallet_common "github.com/status-im/status-go/services/wallet/common" "github.com/status-im/status-go/services/wallet/router/pathprocessor" "github.com/status-im/status-go/services/wallet/walletevent" @@ -123,7 +124,7 @@ func (tm *TransactionManager) ProceedWithTransactionsSignatures(ctx context.Cont _, err := tm.InsertMultiTransaction(tm.multiTransactionForKeycardSigning) if err != nil { - log.Error("failed to insert multi transaction", "err", err) + logutils.ZapLogger().Error("failed to insert multi transaction", zap.Error(err)) } return &MultiTransactionCommandResult{ @@ -184,7 +185,7 @@ func (tm *TransactionManager) WatchTransaction(ctx context.Context, chainID uint status, err := tm.pendingTracker.Watch(ctx, wallet_common.ChainID(chainID), transactionHash) if err == nil && *status != transactions.Pending { - log.Error("transaction is not pending", "status", status) + logutils.ZapLogger().Error("transaction is not pending", zap.String("status", *status)) return nil } diff --git a/services/wallet/walletconnect/database.go b/services/wallet/walletconnect/database.go index 2eda32ad855..c75fae09aad 100644 --- a/services/wallet/walletconnect/database.go +++ b/services/wallet/walletconnect/database.go @@ -4,7 +4,9 @@ import ( "database/sql" "fmt" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" ) type DBSession struct { @@ -33,7 +35,7 @@ func UpsertSession(db *sql.DB, data DBSession) error { if err != nil { rollErr := tx.Rollback() if rollErr != nil { - log.Error("error rolling back transaction", "rollErr", rollErr, "err", err) + logutils.ZapLogger().Error("error rolling back transaction", zap.NamedError("rollErr", rollErr), zap.Error(err)) } } }() diff --git a/services/wallet/walletconnect/walletconnect.go b/services/wallet/walletconnect/walletconnect.go index b2b7810a632..47ae06f77bf 100644 --- a/services/wallet/walletconnect/walletconnect.go +++ b/services/wallet/walletconnect/walletconnect.go @@ -11,11 +11,13 @@ import ( "strings" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/params" "github.com/status-im/status-go/services/typeddata" @@ -130,18 +132,24 @@ type Session struct { func (n *Namespace) Valid(namespaceName string, chainID *uint64) bool { if chainID == nil { if len(n.Chains) == 0 { - log.Warn("namespace doesn't refer to any chain") + logutils.ZapLogger().Warn("namespace doesn't refer to any chain") return false } for _, caip2Str := range n.Chains { resolvedNamespaceName, _, err := parseCaip2ChainID(caip2Str) if err != nil { - log.Warn("namespace chain not in caip2 format", "chain", caip2Str, "error", err) + logutils.ZapLogger().Warn("namespace chain not in caip2 format", + zap.String("chain", caip2Str), + zap.Error(err), + ) return false } if resolvedNamespaceName != namespaceName { - log.Warn("namespace name doesn't match", "namespace", namespaceName, "chain", caip2Str) + logutils.ZapLogger().Warn("namespace name doesn't match", + zap.String("namespace", namespaceName), + zap.String("chain", caip2Str), + ) return false } } @@ -156,7 +164,10 @@ func (p *Params) ValidateForProposal() bool { if strings.Contains(key, ":") { resolvedNamespaceName, cID, err := parseCaip2ChainID(key) if err != nil { - log.Warn("params validation failed CAIP-2", "str", key, "error", err) + logutils.ZapLogger().Warn("params validation failed CAIP-2", + zap.String("str", key), + zap.Error(err), + ) return false } key = resolvedNamespaceName @@ -164,7 +175,7 @@ func (p *Params) ValidateForProposal() bool { } if !isValidNamespaceName(key) { - log.Warn("invalid namespace name", "namespace", key) + logutils.ZapLogger().Warn("invalid namespace name", zap.String("namespace", key)) return false } @@ -235,7 +246,10 @@ func supportedChainsInSession(session Session) []uint64 { for _, caip2Str := range caipChains { _, chainID, err := parseCaip2ChainID(caip2Str) if err != nil { - log.Warn("Failed parsing CAIP-2", "str", caip2Str, "error", err) + logutils.ZapLogger().Warn("Failed parsing CAIP-2", + zap.String("str", caip2Str), + zap.Error(err), + ) continue } diff --git a/services/wallet/walletevent/transmitter.go b/services/wallet/walletevent/transmitter.go index 20da9ae6088..441e882511e 100644 --- a/services/wallet/walletevent/transmitter.go +++ b/services/wallet/walletevent/transmitter.go @@ -3,10 +3,12 @@ package walletevent import ( "sync" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" gocommon "github.com/status-im/status-go/common" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/signal" ) @@ -45,7 +47,7 @@ func (tmr *SignalsTransmitter) Start() error { // technically event.Feed cannot send an error to subscription.Err channel. // the only time we will get an event is when that channel is closed. if err != nil { - log.Error("wallet signals transmitter failed with", "error", err) + logutils.ZapLogger().Error("wallet signals transmitter failed with", zap.Error(err)) } return case event := <-events: diff --git a/services/wallet/walletevent/watcher.go b/services/wallet/walletevent/watcher.go index 9750c2ff76b..7fadbc645d6 100644 --- a/services/wallet/walletevent/watcher.go +++ b/services/wallet/walletevent/watcher.go @@ -3,8 +3,10 @@ package walletevent import ( "context" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/wallet/async" ) @@ -54,7 +56,7 @@ func watch(ctx context.Context, feed *event.Feed, callback EventCb) error { return nil case err := <-sub.Err(): if err != nil { - log.Error("wallet event watcher subscription failed", "error", err) + logutils.ZapLogger().Error("wallet event watcher subscription failed", zap.Error(err)) } case ev := <-ch: if callback != nil { diff --git a/services/web3provider/api.go b/services/web3provider/api.go index 9e192d5a8f3..929c531bcdb 100644 --- a/services/web3provider/api.go +++ b/services/web3provider/api.go @@ -4,11 +4,13 @@ import ( "encoding/json" "errors" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" signercore "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/status-im/status-go/account" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/services/typeddata" "github.com/status-im/status-go/transactions" ) @@ -237,18 +239,21 @@ func (api *API) web3AccResponse(request Web3SendAsyncReadOnlyRequest) (*Web3Send func (api *API) getVerifiedWalletAccount(address, password string) (*account.SelectedExtKey, error) { exists, err := api.s.accountsDB.AddressExists(types.HexToAddress(address)) if err != nil { - log.Error("failed to query db for a given address", "address", address, "error", err) + logutils.ZapLogger().Error("failed to query db for a given address", + zap.String("address", address), + zap.Error(err), + ) return nil, err } if !exists { - log.Error("failed to get a selected account", "err", transactions.ErrInvalidTxSender) + logutils.ZapLogger().Error("failed to get a selected account", zap.Error(transactions.ErrInvalidTxSender)) return nil, transactions.ErrAccountDoesntExist } key, err := api.s.accountsManager.VerifyAccountPassword(api.s.config.KeyStoreDir, address, password) if err != nil { - log.Error("failed to verify account", "account", address, "error", err) + logutils.ZapLogger().Error("failed to verify account", zap.String("account", address), zap.Error(err)) return nil, err } @@ -275,7 +280,7 @@ func (api *API) web3SignatureResponse(request Web3SendAsyncReadOnlyRequest) (*We } if err != nil { - log.Error("could not sign message", "err", err) + logutils.ZapLogger().Error("could not sign message", zap.Error(err)) return &Web3SendAsyncReadOnlyResponse{ ProviderResponse: ProviderResponse{ ResponseType: Web3SendAsyncCallback, @@ -328,7 +333,7 @@ func (api *API) ProcessWeb3ReadOnlyRequest(request Web3SendAsyncReadOnlyRequest) hash, err := api.sendTransaction(request.Payload.ChainID, trxArgs, request.Payload.Password, Web3SendAsyncReadOnly) if err != nil { - log.Error("could not send transaction message", "err", err) + logutils.ZapLogger().Error("could not send transaction message", zap.Error(err)) return &Web3SendAsyncReadOnlyResponse{ ProviderResponse: ProviderResponse{ ResponseType: Web3SendAsyncCallback, diff --git a/services/web3provider/api_test.go b/services/web3provider/api_test.go index fe815a2121d..f78279f335a 100644 --- a/services/web3provider/api_test.go +++ b/services/web3provider/api_test.go @@ -14,6 +14,7 @@ import ( "github.com/status-im/status-go/multiaccounts/accounts" "github.com/status-im/status-go/multiaccounts/settings" "github.com/status-im/status-go/params" + "github.com/status-im/status-go/protocol/tt" "github.com/status-im/status-go/services/permissions" "github.com/status-im/status-go/t/helpers" "github.com/status-im/status-go/t/utils" @@ -54,7 +55,7 @@ func setupTestAPI(t *testing.T) (*API, func()) { utils.Init() require.NoError(t, utils.ImportTestAccount(keyStoreDir, utils.GetAccount1PKFile())) - accManager := account.NewGethManager() + accManager := account.NewGethManager(tt.MustCreateTestLogger()) nodeConfig := ¶ms.NodeConfig{ KeyStoreDir: keyStoreDir, diff --git a/signal/signals.go b/signal/signals.go index c6f37c04914..30c3ac899ca 100644 --- a/signal/signals.go +++ b/signal/signals.go @@ -14,7 +14,9 @@ import ( "sync" - "github.com/ethereum/go-ethereum/log" + "go.uber.org/zap" + + "github.com/status-im/status-go/logutils" ) // MobileSignalHandler is a simple callback function that gets called when any signal is received @@ -24,7 +26,7 @@ type MobileSignalHandler func([]byte) var mobileSignalHandler MobileSignalHandler // All general log messages in this package should be routed through this logger. -var logger = log.New("package", "status-go/signal") +var logger = logutils.ZapLogger().Named("signal") // Envelope is a general signal sent upward from node to RN app type Envelope struct { @@ -45,7 +47,7 @@ func send(typ string, event interface{}) { signal := NewEnvelope(typ, event) data, err := json.Marshal(&signal) if err != nil { - logger.Error("Marshalling signal envelope", "error", err) + logger.Error("Marshalling signal envelope", zap.Error(err)) return } // If a Go implementation of signal handler is set, let's use it. @@ -84,7 +86,7 @@ func ResetDefaultNodeNotificationHandler() { // TriggerDefaultNodeNotificationHandler triggers default notification handler (helpful in tests) func TriggerDefaultNodeNotificationHandler(jsonEvent string) { - logger.Trace("Notification received", "event", jsonEvent) + logger.Debug("Notification received", zap.String("event", jsonEvent)) } // nolint: golint diff --git a/telemetry/client.go b/telemetry/client.go index 5c21177089f..276ff6588d2 100644 --- a/telemetry/client.go +++ b/telemetry/client.go @@ -18,28 +18,49 @@ import ( "github.com/status-im/status-go/wakuv2" wps "github.com/waku-org/go-waku/waku/v2/peerstore" - v2protocol "github.com/waku-org/go-waku/waku/v2/protocol" v1protocol "github.com/status-im/status-go/protocol/v1" + v2common "github.com/status-im/status-go/wakuv2/common" + v2protocol "github.com/waku-org/go-waku/waku/v2/protocol" ) type TelemetryType string const ( - ProtocolStatsMetric TelemetryType = "ProtocolStats" - SentEnvelopeMetric TelemetryType = "SentEnvelope" - UpdateEnvelopeMetric TelemetryType = "UpdateEnvelope" - ReceivedMessagesMetric TelemetryType = "ReceivedMessages" + // Bandwidth as reported by libp2p + ProtocolStatsMetric TelemetryType = "ProtocolStats" + // Envelopes sent by this node + SentEnvelopeMetric TelemetryType = "SentEnvelope" + // Change in status of a sent envelope (usually processing errors) + UpdateEnvelopeMetric TelemetryType = "UpdateEnvelope" + // Messages received by this node + ReceivedMessagesMetric TelemetryType = "ReceivedMessages" + // Errors encountered when sending envelopes ErrorSendingEnvelopeMetric TelemetryType = "ErrorSendingEnvelope" - PeerCountMetric TelemetryType = "PeerCount" - PeerConnFailuresMetric TelemetryType = "PeerConnFailure" - MessageCheckSuccessMetric TelemetryType = "MessageCheckSuccess" - MessageCheckFailureMetric TelemetryType = "MessageCheckFailure" - PeerCountByShardMetric TelemetryType = "PeerCountByShard" - PeerCountByOriginMetric TelemetryType = "PeerCountByOrigin" - MaxRetryCache = 5000 + // Total connections for this node at a given time + PeerCountMetric TelemetryType = "PeerCount" + // Number of failed peer connections for this node at a given time + PeerConnFailuresMetric TelemetryType = "PeerConnFailure" + // Store confirmation for a sent message successful + MessageCheckSuccessMetric TelemetryType = "MessageCheckSuccess" + // Store confirmation for a sent message failed + MessageCheckFailureMetric TelemetryType = "MessageCheckFailure" + // Total connections for this node per shard at a given time + PeerCountByShardMetric TelemetryType = "PeerCountByShard" + // Total connections for this node per discovery origin at a given time + PeerCountByOriginMetric TelemetryType = "PeerCountByOrigin" + // Error encountered when attempting to dial a peer + DialFailureMetric TelemetryType = "DialFailure" + // Missed message as detected by periodic store query + MissedMessageMetric TelemetryType = "MissedMessages" + // Missed message with a relevant filter + MissedRelevantMessageMetric TelemetryType = "MissedRelevantMessages" + // MVDS ack received for a sent message + MessageDeliveryConfirmedMetric TelemetryType = "MessageDeliveryConfirmed" ) +const MaxRetryCache = 5000 + type TelemetryRequest struct { Id int `json:"id"` TelemetryType TelemetryType `json:"telemetry_type"` @@ -103,6 +124,26 @@ func (c *Client) PushPeerCountByOrigin(ctx context.Context, peerCountByOrigin ma } } +func (c *Client) PushDialFailure(ctx context.Context, dialFailure v2common.DialError) { + var errorMessage string = "" + if dialFailure.ErrType == v2common.ErrorUnknown { + errorMessage = dialFailure.ErrMsg + } + c.processAndPushTelemetry(ctx, DialFailure{ErrorType: dialFailure.ErrType, ErrorMsg: errorMessage, Protocols: dialFailure.Protocols}) +} + +func (c *Client) PushMissedMessage(ctx context.Context, envelope *v2protocol.Envelope) { + c.processAndPushTelemetry(ctx, MissedMessage{Envelope: envelope}) +} + +func (c *Client) PushMissedRelevantMessage(ctx context.Context, receivedMessage *v2common.ReceivedMessage) { + c.processAndPushTelemetry(ctx, MissedRelevantMessage{ReceivedMessage: receivedMessage}) +} + +func (c *Client) PushMessageDeliveryConfirmed(ctx context.Context, messageHash string) { + c.processAndPushTelemetry(ctx, MessageDeliveryConfirmed{MessageHash: messageHash}) +} + type ReceivedMessages struct { Filter transport.Filter SSHMessage *types.Message @@ -136,6 +177,24 @@ type PeerCountByOrigin struct { Count uint } +type DialFailure struct { + ErrorType v2common.DialErrorType + ErrorMsg string + Protocols string +} + +type MissedMessage struct { + Envelope *v2protocol.Envelope +} + +type MissedRelevantMessage struct { + ReceivedMessage *v2common.ReceivedMessage +} + +type MessageDeliveryConfirmed struct { + MessageHash string +} + type Client struct { serverURL string httpClient *http.Client @@ -308,6 +367,30 @@ func (c *Client) processAndPushTelemetry(ctx context.Context, data interface{}) TelemetryType: PeerCountByOriginMetric, TelemetryData: c.ProcessPeerCountByOrigin(v), } + case DialFailure: + telemetryRequest = TelemetryRequest{ + Id: c.nextId, + TelemetryType: DialFailureMetric, + TelemetryData: c.ProcessDialFailure(v), + } + case MissedMessage: + telemetryRequest = TelemetryRequest{ + Id: c.nextId, + TelemetryType: MissedMessageMetric, + TelemetryData: c.ProcessMissedMessage(v), + } + case MissedRelevantMessage: + telemetryRequest = TelemetryRequest{ + Id: c.nextId, + TelemetryType: MissedRelevantMessageMetric, + TelemetryData: c.ProcessMissedRelevantMessage(v), + } + case MessageDeliveryConfirmed: + telemetryRequest = TelemetryRequest{ + Id: c.nextId, + TelemetryType: MessageDeliveryConfirmedMetric, + TelemetryData: c.ProcessMessageDeliveryConfirmed(v), + } default: c.logger.Error("Unknown telemetry data type") return @@ -396,9 +479,7 @@ func (c *Client) ProcessSentEnvelope(sentEnvelope wakuv2.SentEnvelope) *json.Raw postBody["topic"] = sentEnvelope.Envelope.Message().ContentTopic postBody["senderKeyUID"] = c.keyUID postBody["publishMethod"] = sentEnvelope.PublishMethod.String() - body, _ := json.Marshal(postBody) - jsonRawMessage := json.RawMessage(body) - return &jsonRawMessage + return c.marshalPostBody(postBody) } func (c *Client) ProcessErrorSendingEnvelope(errorSendingEnvelope wakuv2.ErrorSendingEnvelope) *json.RawMessage { @@ -410,17 +491,13 @@ func (c *Client) ProcessErrorSendingEnvelope(errorSendingEnvelope wakuv2.ErrorSe postBody["senderKeyUID"] = c.keyUID postBody["publishMethod"] = errorSendingEnvelope.SentEnvelope.PublishMethod.String() postBody["error"] = errorSendingEnvelope.Error.Error() - body, _ := json.Marshal(postBody) - jsonRawMessage := json.RawMessage(body) - return &jsonRawMessage + return c.marshalPostBody(postBody) } func (c *Client) ProcessPeerCount(peerCount PeerCount) *json.RawMessage { postBody := c.commonPostBody() postBody["peerCount"] = peerCount.PeerCount - body, _ := json.Marshal(postBody) - jsonRawMessage := json.RawMessage(body) - return &jsonRawMessage + return c.marshalPostBody(postBody) } func (c *Client) ProcessPeerConnFailure(peerConnFailure PeerConnFailure) *json.RawMessage { @@ -428,41 +505,74 @@ func (c *Client) ProcessPeerConnFailure(peerConnFailure PeerConnFailure) *json.R postBody["failedPeerId"] = peerConnFailure.FailedPeerId postBody["failureCount"] = peerConnFailure.FailureCount postBody["nodeKeyUID"] = c.keyUID - body, _ := json.Marshal(postBody) - jsonRawMessage := json.RawMessage(body) - return &jsonRawMessage + return c.marshalPostBody(postBody) } func (c *Client) ProcessMessageCheckSuccess(messageCheckSuccess MessageCheckSuccess) *json.RawMessage { postBody := c.commonPostBody() postBody["messageHash"] = messageCheckSuccess.MessageHash - body, _ := json.Marshal(postBody) - jsonRawMessage := json.RawMessage(body) - return &jsonRawMessage + return c.marshalPostBody(postBody) } func (c *Client) ProcessPeerCountByShard(peerCountByShard PeerCountByShard) *json.RawMessage { postBody := c.commonPostBody() postBody["shard"] = peerCountByShard.Shard postBody["count"] = peerCountByShard.Count - body, _ := json.Marshal(postBody) - jsonRawMessage := json.RawMessage(body) - return &jsonRawMessage + return c.marshalPostBody(postBody) } func (c *Client) ProcessMessageCheckFailure(messageCheckFailure MessageCheckFailure) *json.RawMessage { postBody := c.commonPostBody() postBody["messageHash"] = messageCheckFailure.MessageHash - body, _ := json.Marshal(postBody) - jsonRawMessage := json.RawMessage(body) - return &jsonRawMessage + return c.marshalPostBody(postBody) } func (c *Client) ProcessPeerCountByOrigin(peerCountByOrigin PeerCountByOrigin) *json.RawMessage { postBody := c.commonPostBody() postBody["origin"] = peerCountByOrigin.Origin postBody["count"] = peerCountByOrigin.Count - body, _ := json.Marshal(postBody) + return c.marshalPostBody(postBody) +} + +func (c *Client) ProcessDialFailure(dialFailure DialFailure) *json.RawMessage { + postBody := c.commonPostBody() + postBody["errorType"] = dialFailure.ErrorType + postBody["errorMsg"] = dialFailure.ErrorMsg + postBody["protocols"] = dialFailure.Protocols + return c.marshalPostBody(postBody) +} + +func (c *Client) ProcessMissedMessage(missedMessage MissedMessage) *json.RawMessage { + postBody := c.commonPostBody() + postBody["messageHash"] = missedMessage.Envelope.Hash().String() + postBody["sentAt"] = uint32(missedMessage.Envelope.Message().GetTimestamp() / int64(time.Second)) + postBody["pubsubTopic"] = missedMessage.Envelope.PubsubTopic() + postBody["contentTopic"] = missedMessage.Envelope.Message().ContentTopic + return c.marshalPostBody(postBody) +} + +func (c *Client) ProcessMissedRelevantMessage(missedMessage MissedRelevantMessage) *json.RawMessage { + postBody := c.commonPostBody() + postBody["messageHash"] = missedMessage.ReceivedMessage.Envelope.Hash().String() + postBody["sentAt"] = missedMessage.ReceivedMessage.Sent + postBody["pubsubTopic"] = missedMessage.ReceivedMessage.PubsubTopic + postBody["contentTopic"] = missedMessage.ReceivedMessage.ContentTopic + return c.marshalPostBody(postBody) +} + +func (c *Client) ProcessMessageDeliveryConfirmed(messageDeliveryConfirmed MessageDeliveryConfirmed) *json.RawMessage { + postBody := c.commonPostBody() + postBody["messageHash"] = messageDeliveryConfirmed.MessageHash + return c.marshalPostBody(postBody) +} + +// Helper function to marshal post body and handle errors +func (c *Client) marshalPostBody(postBody map[string]interface{}) *json.RawMessage { + body, err := json.Marshal(postBody) + if err != nil { + c.logger.Error("Error marshaling post body", zap.Error(err)) + return nil + } jsonRawMessage := json.RawMessage(body) return &jsonRawMessage } diff --git a/telemetry/client_test.go b/telemetry/client_test.go index a19a9569cc9..a549da455d7 100644 --- a/telemetry/client_test.go +++ b/telemetry/client_test.go @@ -26,6 +26,7 @@ import ( "github.com/status-im/status-go/protocol/tt" v1protocol "github.com/status-im/status-go/protocol/v1" "github.com/status-im/status-go/wakuv2" + "github.com/status-im/status-go/wakuv2/common" ) var ( @@ -288,22 +289,37 @@ func TestRetryCacheCleanup(t *testing.T) { ctx := context.Background() client := createClient(t, "") - client.Start(ctx) for i := 0; i < 6000; i++ { - sendEnvelope(ctx, client) + go sendEnvelope(ctx, client) + telemetryRequest := <-client.telemetryCh + client.telemetryCache = append(client.telemetryCache, telemetryRequest) } - time.Sleep(110 * time.Millisecond) + err := client.pushTelemetryRequest(client.telemetryCache) + // For this test case an error when pushing to the server is fine + require.Error(t, err) + client.telemetryCache = nil require.Equal(t, 6000, len(client.telemetryRetryCache)) - sendEnvelope(ctx, client) + go sendEnvelope(ctx, client) + telemetryRequest := <-client.telemetryCh + client.telemetryCache = append(client.telemetryCache, telemetryRequest) + + err = client.pushTelemetryRequest(client.telemetryCache) + require.Error(t, err) - time.Sleep(210 * time.Millisecond) + telemetryRequests := make([]TelemetryRequest, len(client.telemetryCache)) + copy(telemetryRequests, client.telemetryCache) + client.telemetryCache = nil + + err = client.pushTelemetryRequest(telemetryRequests) + require.Error(t, err) require.Equal(t, 5001, len(client.telemetryRetryCache)) } + func setDefaultConfig(config *wakuv2.Config, lightMode bool) { config.ClusterID = 16 @@ -453,3 +469,131 @@ func TestPeerCountByOrigin(t *testing.T) { require.NotEqual(t, 0, len(w.Peers())) }) } + +type testCase struct { + name string + input interface{} + expectedType TelemetryType + expectedFields map[string]interface{} +} + +func runTestCase(t *testing.T, tc testCase) { + ctx := context.Background() + client := createClient(t, "") + + go client.processAndPushTelemetry(ctx, tc.input) + + telemetryRequest := <-client.telemetryCh + + require.Equal(t, tc.expectedType, telemetryRequest.TelemetryType, "Unexpected telemetry type") + + var telemetryData map[string]interface{} + err := json.Unmarshal(*telemetryRequest.TelemetryData, &telemetryData) + require.NoError(t, err, "Failed to unmarshal telemetry data") + + for key, value := range tc.expectedFields { + require.Equal(t, value, telemetryData[key], "Unexpected value for %s", key) + } + + require.Contains(t, telemetryData, "nodeName", "Missing nodeName in telemetry data") + require.Contains(t, telemetryData, "peerId", "Missing peerId in telemetry data") + require.Contains(t, telemetryData, "statusVersion", "Missing statusVersion in telemetry data") + require.Contains(t, telemetryData, "deviceType", "Missing deviceType in telemetry data") + require.Contains(t, telemetryData, "timestamp", "Missing timestamp in telemetry data") + + // Simulate pushing the telemetry request + client.telemetryCache = append(client.telemetryCache, telemetryRequest) + + err = client.pushTelemetryRequest(client.telemetryCache) + // For this test case, we expect an error when pushing to the server + require.Error(t, err) + + // Verify that the request is now in the retry cache + require.Equal(t, 1, len(client.telemetryRetryCache), "Expected one item in telemetry retry cache") +} + +func TestProcessMessageDeliveryConfirmed(t *testing.T) { + tc := testCase{ + name: "MessageDeliveryConfirmed", + input: MessageDeliveryConfirmed{ + MessageHash: "0x1234567890abcdef", + }, + expectedType: MessageDeliveryConfirmedMetric, + expectedFields: map[string]interface{}{ + "messageHash": "0x1234567890abcdef", + }, + } + runTestCase(t, tc) +} + +func TestProcessMissedRelevantMessage(t *testing.T) { + now := time.Now() + message := common.NewReceivedMessage( + v2protocol.NewEnvelope( + &pb.WakuMessage{ + Payload: []byte{1, 2, 3, 4, 5}, + ContentTopic: testContentTopic, + Version: proto.Uint32(0), + Timestamp: proto.Int64(now.Unix()), + }, 0, ""), + common.MissingMessageType, + ) + tc := testCase{ + name: "MissedRelevantMessage", + input: MissedRelevantMessage{ + ReceivedMessage: message, + }, + expectedType: MissedRelevantMessageMetric, + expectedFields: map[string]interface{}{ + "messageHash": message.Envelope.Hash().String(), + "pubsubTopic": "", + "contentTopic": "0x12345679", + }, + } + runTestCase(t, tc) +} + +func TestProcessMissedMessage(t *testing.T) { + now := time.Now() + message := common.NewReceivedMessage( + v2protocol.NewEnvelope( + &pb.WakuMessage{ + Payload: []byte{1, 2, 3, 4, 5}, + ContentTopic: testContentTopic, + Version: proto.Uint32(0), + Timestamp: proto.Int64(now.Unix()), + }, 0, ""), + common.MissingMessageType, + ) + tc := testCase{ + name: "MissedMessage", + input: MissedMessage{ + Envelope: message.Envelope, + }, + expectedType: MissedMessageMetric, + expectedFields: map[string]interface{}{ + "messageHash": message.Envelope.Hash().String(), + "pubsubTopic": "", + "contentTopic": message.Envelope.Message().ContentTopic, + }, + } + runTestCase(t, tc) +} + +func TestProcessDialFailure(t *testing.T) { + tc := testCase{ + name: "DialFailure", + input: DialFailure{ + ErrorType: common.ErrorUnknown, + ErrorMsg: "test error message", + Protocols: "test-protocols", + }, + expectedType: DialFailureMetric, + expectedFields: map[string]interface{}{ + "errorType": float64(common.ErrorUnknown), + "errorMsg": "test error message", + "protocols": "test-protocols", + }, + } + runTestCase(t, tc) +} diff --git a/tests-functional/.gitignore b/tests-functional/.gitignore new file mode 100644 index 00000000000..6a45c55fe60 --- /dev/null +++ b/tests-functional/.gitignore @@ -0,0 +1,2 @@ +.idea/ +.local/ diff --git a/tests-functional/README.MD b/tests-functional/README.MD index 16ceefa9c0f..d30e44e00af 100644 --- a/tests-functional/README.MD +++ b/tests-functional/README.MD @@ -1,6 +1,10 @@ +Hereā€™s the updated README with the additional prerequisites and instructions: + +--- + ## Overview -Functional tests for status-go +Functional tests for `status-go` ## Table of Contents @@ -8,34 +12,92 @@ Functional tests for status-go - [How to Install](#how-to-install) - [How to Run](#how-to-run) - [Running Tests](#running-tests) -- [Implementation details](#implementation-details) +- [Implementation Details](#implementation-details) +- [Build Status Backend](#build-status-backend) ## How to Install -* Install [Docker](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) -* Install [Python 3.10.14](https://www.python.org/downloads/) -* In `./tests-functional`, run `pip install -r requirements.txt` -* **Optional (for test development)**: Use Python virtual environment for better dependency management. You can follow the guide [here](https://akrabat.com/creating-virtual-environments-with-pyenv/): +1. Install [Docker](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) +2. Install [Python 3.10.14](https://www.python.org/downloads/) +3. **Set up a virtual environment (recommended):** + - In `./tests-functional`, run: + ```bash + python -m venv .venv + source .venv/bin/activate + pip install -r requirements.txt + ``` + - **Optional (for test development)**: Use Python virtual environment for better dependency management. You can follow the guide [here](https://akrabat.com/creating-virtual-environments-with-pyenv/) +4. Install pre-commit hooks (optional): + ```bash + pre-commit install + ``` ## How to Run -### Running dev RPC (anvil with contracts) -- In `./tests-functional` run `docker compose -f docker-compose.anvil.yml up --remove-orphans --build`, as result: - * an [anvil](https://book.getfoundry.sh/reference/anvil/) container with ChainID 31337 exposed on `0.0.0.0:8545` will start running - * Status-im contracts will be deployed to the network +### Running dev RPC (Anvil with contracts) + +In `./tests-functional`: +```bash +docker compose -f docker-compose.anvil.yml up --remove-orphans --build +``` + +This command will: +- Start an [Anvil](https://book.getfoundry.sh/reference/anvil/) container with ChainID `31337`, exposed on `0.0.0.0:8545` +- Deploy Status-im contracts to the Anvil network + +### Running Tests + +To run the tests: + +1. In `./tests-functional`, start the testing containers: + ```bash + docker compose -f docker-compose.anvil.yml -f docker-compose.test.status-go.yml -f docker-compose.status-go.local.yml up --build --remove-orphans + ``` + + This command will: + - Create a container with [status-go as daemon](https://github.com/status-im/status-go/issues/5175), exposing `APIModules` on `0.0.0.0:3333` + - Configure `status-go` to use [Anvil](https://book.getfoundry.sh/reference/anvil/) as the `RPCURL` with ChainID `31337` + - Deploy all Status-im contracts to the Anvil network + +2. To execute tests: + - Run all tests: + ```bash + pytest + ``` + - Run tests marked as `wallet`: + ```bash + pytest -m wallet + ``` + - Run a specific test: + ```bash + pytest -k "test_contact_request_baseline" + ``` + +## Implementation Details + +- Functional tests are implemented in `./tests-functional/tests` using [pytest](https://docs.pytest.org/en/8.2.x/). +- Each test performs two types of verifications: + - **`verify_is_valid_json_rpc_response()`**: Checks for a status code `200`, a non-empty response, JSON-RPC structure, presence of the `result` field, and the expected ID. + - **`jsonschema.validate()`**: Validates that the response contains expected data, including required fields and types. Schemas are stored in `/schemas/wallet_MethodName`. + +- **Schema Generation**: + - New schemas can be generated with `./tests-functional/schema_builder.py` by passing a response to the `CustomSchemaBuilder(schema_name).create_schema(response.json())` method. This should be used only during test creation. + - Search `how to create schema:` in test files for examples. + +## Build Status Backend + +You can build the binary with the following command in the `status-go` root directory: + +```bash +make status-backend +``` -### Run tests -- In `./tests-functional` run `docker compose -f docker-compose.anvil.yml -f docker-compose.test.status-go.yml -f docker-compose.status-go.local.yml up --build --remove-orphans`, as result: - * a container with [status-go as daemon](https://github.com/status-im/status-go/issues/5175) will be created with APIModules exposed on `0.0.0.0:3333` - * status-go will use [anvil](https://book.getfoundry.sh/reference/anvil/) as RPCURL with ChainID 31337 - * all Status-im contracts will be deployed to the network +For further details on building and setting up `status-go` and `status-backend`, refer to the official documentation: +- [status-backend README](https://github.com/status-im/status-go/blob/develop/cmd/status-backend/README.md) +- [status-go cmd directory](https://github.com/status-im/status-go/tree/develop/cmd/status-backend) -* In `./tests-functional/tests` directory run `pytest -m wallet` +Location of the binary: `cmd/status-backend/status-backend` -## Implementation details +--- -- Functional tests are implemented in `./tests-functional/tests` based on [pytest](https://docs.pytest.org/en/8.2.x/) -- Every test has two types of verifications: - - `verify_is_valid_json_rpc_response()` checks for status code 200, non-empty response, JSON-RPC structure, presence of the `result` field, and expected ID. - - `jsonschema.validate()` is used to check that the response contains expected data, including required fields and types. Schemas are stored in `/schemas/wallet_MethodName` - - New schemas can be generated using `./tests-functional/schema_builder.py` by passing a response to the `CustomSchemaBuilder(schema_name).create_schema(response.json())` method, should be used only on test creation phase, please search `how to create schema:` to see an example in a test \ No newline at end of file +This README should cover your additional setup, installation, and testing instructions with clear steps for users. Let me know if there are any further modifications needed! \ No newline at end of file diff --git a/tests-functional/clients/signals.py b/tests-functional/clients/signals.py index 28b670af413..192c5056175 100644 --- a/tests-functional/clients/signals.py +++ b/tests-functional/clients/signals.py @@ -9,40 +9,60 @@ class SignalClient: def __init__(self, ws_url, await_signals): self.url = f"{ws_url}/signals" - self.await_signals = await_signals - self.received_signals = { - signal: [] for signal in self.await_signals - } + self.received_signals = {signal: [] for signal in self.await_signals} def on_message(self, ws, signal): - signal = json.loads(signal) - if signal.get("type") in self.await_signals: - self.received_signals[signal["type"]].append(signal) + logger = logging.getLogger(__name__) + + signal_data = json.loads(signal) + signal_type = signal_data.get("type") + + logger.info(f"Received signal: {signal_data}") - def wait_for_signal(self, signal_type, timeout=20): + if signal_type in self.await_signals: + self.received_signals[signal_type].append(signal_data) + # logger.debug(f"Signal {signal_type} stored: {signal_data}") + + def wait_for_signal(self, signal_type, expected_event=None, timeout=20): + logger = logging.getLogger(__name__) start_time = time.time() - while not self.received_signals.get(signal_type): - if time.time() - start_time >= timeout: - raise TimeoutError( - f"Signal {signal_type} is not received in {timeout} seconds") + while time.time() - start_time < timeout: + if self.received_signals.get(signal_type): + received_signal = self.received_signals[signal_type][0] + if expected_event: + event = received_signal.get("event", {}) + if all(event.get(k) == v for k, v in expected_event.items()): + logger.info(f"Signal {signal_type} with event {expected_event} received and matched.") + return received_signal + else: + logger.debug( + f"Signal {signal_type} received but event did not match expected event: {expected_event}. Received event: {event}") + else: + logger.info(f"Signal {signal_type} received without specific event validation.") + return received_signal time.sleep(0.2) - logging.debug(f"Signal {signal_type} is received in {round(time.time() - start_time)} seconds") - return self.received_signals[signal_type][0] + + raise TimeoutError(f"Signal {signal_type} with event {expected_event} not received in {timeout} seconds") def _on_error(self, ws, error): - logging.error(f"Error: {error}") + logger = logging.getLogger(__name__) + logger.error(f"WebSocket error: {error}") def _on_close(self, ws, close_status_code, close_msg): - logging.info(f"Connection closed: {close_status_code}, {close_msg}") + logger = logging.getLogger(__name__) + logger.info(f"WebSocket connection closed: {close_status_code}, {close_msg}") def _on_open(self, ws): - logging.info("Connection opened") + logger = logging.getLogger(__name__) + logger.info("WebSocket connection opened") def _connect(self): - ws = websocket.WebSocketApp(self.url, - on_message=self.on_message, - on_error=self._on_error, - on_close=self._on_close) + ws = websocket.WebSocketApp( + self.url, + on_message=self.on_message, + on_error=self._on_error, + on_close=self._on_close + ) ws.on_open = self._on_open ws.run_forever() diff --git a/tests-functional/clients/status_backend.py b/tests-functional/clients/status_backend.py index 6fb7313cb24..aa2f6a530ec 100644 --- a/tests-functional/clients/status_backend.py +++ b/tests-functional/clients/status_backend.py @@ -1,6 +1,8 @@ import json import logging +import time from datetime import datetime +from json import JSONDecodeError import jsonschema import requests @@ -27,7 +29,7 @@ def _check_decode_and_key_errors_in_response(self, response, key): f"Key '{key}' not found in the JSON response: {response.content}") def verify_is_valid_json_rpc_response(self, response, _id=None): - assert response.status_code == 200 + assert response.status_code == 200, f"Got response {response.content}, status code {response.status_code}" assert response.content self._check_decode_and_key_errors_in_response(response, "result") @@ -53,23 +55,26 @@ def rpc_request(self, method, params=[], request_id=13, url=None): data["params"] = params logging.info(f"Sending POST request to url {url} with data: {json.dumps(data, sort_keys=True, indent=4)}") response = self.client.post(url, json=data) - + try: + logging.info(f"Got response: {json.dumps(response.json(), sort_keys=True, indent=4)}") + except JSONDecodeError: + logging.info(f"Got response: {response.content}") return response def rpc_valid_request(self, method, params=[], _id=None, url=None): response = self.rpc_request(method, params, _id, url) self.verify_is_valid_json_rpc_response(response, _id) return response - + def verify_json_schema(self, response, method): with open(f"{option.base_dir}/schemas/{method}", "r") as schema: - jsonschema.validate(instance=response.json(), + jsonschema.validate(instance=response, schema=json.load(schema)) class StatusBackend(RpcClient, SignalClient): - def __init__(self, await_signals): + def __init__(self, await_signals=list()): self.api_url = f"{option.rpc_url_status_backend}/statusgo" self.ws_url = f"{option.ws_url_status_backend}" @@ -81,12 +86,15 @@ def __init__(self, await_signals): def api_request(self, method, data, url=None): url = url if url else self.api_url url = f"{url}/{method}" + logging.info(f"Sending POST request to url {url} with data: {json.dumps(data, sort_keys=True, indent=4)}") response = requests.post(url, json=data) + logging.info(f"Got response: {response.content}") return response def verify_is_valid_api_response(self, response): - assert response.status_code == 200 + assert response.status_code == 200, f"Got response {response.content}, status code {response.status_code}" assert response.content + logging.info(f"Got response: {response.content}") try: assert not response.json()["error"] except json.JSONDecodeError: @@ -119,6 +127,46 @@ def create_account_and_login(self, display_name="Mr_Meeseeks", password=user_1.p } return self.api_valid_request(method, data) + def restore_account_and_login(self, display_name="Mr_Meeseeks", user=user_1): + method = "RestoreAccountAndLogin" + data = { + "rootDataDir": "/", + "displayName": display_name, + "password": user.password, + "mnemonic": user.passphrase, + "customizationColor": "blue", + "testNetworksEnabled": True, + "networkId": 31337, + "networksOverride": [ + { + "ChainID": 31337, + "ChainName": "Anvil", + "DefaultRPCURL": "http://anvil:8545", + "RPCURL": "http://anvil:8545", + "ShortName": "eth", + "NativeCurrencyName": "Ether", + "NativeCurrencySymbol": "ETH", + "NativeCurrencyDecimals": 18, + "IsTest": False, + "Layer": 1, + "Enabled": True + } + ] + } + return self.api_valid_request(method, data) + + def restore_account_and_wait_for_rpc_client_to_start(self, timeout=60): + self.restore_account_and_login() + start_time = time.time() + # ToDo: change this part for waiting for `node.login` signal when websockets are migrated to StatusBackend + while time.time() - start_time <= timeout: + try: + self.rpc_valid_request(method='accounts_getKeypairs') + return + except AssertionError: + time.sleep(3) + raise TimeoutError(f"RPC client was not started after {timeout} seconds") + def start_messenger(self, params=[]): method = "wakuext_startMessenger" response = self.rpc_request(method, params) diff --git a/tests-functional/conftest.py b/tests-functional/conftest.py index b6b7b1c2581..9433da149bc 100644 --- a/tests-functional/conftest.py +++ b/tests-functional/conftest.py @@ -82,6 +82,6 @@ def init_status_backend(): websocket_thread.start() backend_client.init_status_backend() - backend_client.create_account_and_login() + backend_client.restore_account_and_wait_for_rpc_client_to_start() yield backend_client diff --git a/tests-functional/constants.py b/tests-functional/constants.py index 2730d4c3800..e6d3b732da3 100644 --- a/tests-functional/constants.py +++ b/tests-functional/constants.py @@ -1,20 +1,50 @@ +import os +import random from dataclasses import dataclass - +from src.libs.common import create_unique_data_dir @dataclass class Account: address: str private_key: str password: str + passphrase: str - +# User accounts user_1 = Account( address="0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", private_key="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", - password="Strong12345" + password="Strong12345", + passphrase="test test test test test test test test test test test junk" ) user_2 = Account( address="0x70997970c51812dc3a010c7d01b50e0d17dc79c8", private_key="0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", - password="Strong12345" + password="Strong12345", + passphrase="test test test test test test test test test test nest junk" ) + +# Paths and URLs +PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) +STATUS_BACKEND_URL = os.getenv("STATUS_BACKEND_URL", "http://127.0.0.1") +API_REQUEST_TIMEOUT = int(os.getenv("API_REQUEST_TIMEOUT", "15")) + +# Paths relative to project root +DATA_DIR = os.path.join(PROJECT_ROOT, "tests-functional/local") +LOCAL_DATA_DIR1 = create_unique_data_dir(DATA_DIR, random.randint(1, 100)) +LOCAL_DATA_DIR2 = create_unique_data_dir(DATA_DIR, random.randint(1, 100)) +RESOURCES_FOLDER = os.path.join(PROJECT_ROOT, "resources") + +# Account payload default values +ACCOUNT_PAYLOAD_DEFAULTS = { + "displayName": "user", + "password": "test_password", + "customizationColor": "primary" +} + +# Network emulation commands +LATENCY_CMD = "sudo tc qdisc add dev eth0 root netem delay 1s 100ms distribution normal" +PACKET_LOSS_CMD = "sudo tc qdisc add dev eth0 root netem loss 50%" +LOW_BANDWIDTH_CMD = "sudo tc qdisc add dev eth0 root tbf rate 1kbit burst 1kbit" +REMOVE_TC_CMD = "sudo tc qdisc del dev eth0 root" +NUM_CONTACT_REQUESTS = 5 \ No newline at end of file diff --git a/tests-functional/docker-compose.test.status-go.yml b/tests-functional/docker-compose.test.status-go.yml index 044f1c0904b..8eb00a92086 100644 --- a/tests-functional/docker-compose.test.status-go.yml +++ b/tests-functional/docker-compose.test.status-go.yml @@ -16,9 +16,6 @@ services: "--password", "Strong12345", "--dir", "/tmp/status-go-data", # Keep in sync with `config.json/DataDir` value. Later this arg will not be needed. ] - ports: - - 3333:3333 - # - 8354:8354 # use for local debbuging only healthcheck: test: ["CMD-SHELL", "curl -X POST --data '{\"jsonrpc\":\"2.0\",\"method\":\"net_version\",\"params\":[],\"id\":1}' -H 'Content-Type: application/json' http://0.0.0.0:3333 || exit 1"] interval: 5s diff --git a/tests-functional/pytest.ini b/tests-functional/pytest.ini index 505fab3c83b..c2a4e479b1d 100644 --- a/tests-functional/pytest.ini +++ b/tests-functional/pytest.ini @@ -12,3 +12,5 @@ markers = accounts ethclient init + transaction + create_account diff --git a/tests-functional/schemas/accounts_getKeypairs b/tests-functional/schemas/accounts_getKeypairs index 28b25ed394e..1a4aa5623e9 100644 --- a/tests-functional/schemas/accounts_getKeypairs +++ b/tests-functional/schemas/accounts_getKeypairs @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/accounts_hasPairedDevices b/tests-functional/schemas/accounts_hasPairedDevices index a0a3e5dbea7..25947679ed7 100644 --- a/tests-functional/schemas/accounts_hasPairedDevices +++ b/tests-functional/schemas/accounts_hasPairedDevices @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/accounts_remainingAccountCapacity b/tests-functional/schemas/accounts_remainingAccountCapacity index a34c43a2300..d7d0c480d03 100644 --- a/tests-functional/schemas/accounts_remainingAccountCapacity +++ b/tests-functional/schemas/accounts_remainingAccountCapacity @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/signal_mediaserver_started b/tests-functional/schemas/signal_mediaserver_started new file mode 100644 index 00000000000..f9a5c5a5d25 --- /dev/null +++ b/tests-functional/schemas/signal_mediaserver_started @@ -0,0 +1,24 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "mediaserver.started" + }, + "event": { + "type": "object", + "properties": { + "port": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + } + }, + "required": ["port"], + "additionalProperties": false + } + }, + "required": ["type", "event"], + "additionalProperties": false +} \ No newline at end of file diff --git a/tests-functional/schemas/signal_node_login b/tests-functional/schemas/signal_node_login new file mode 100644 index 00000000000..f22ddccc7dd --- /dev/null +++ b/tests-functional/schemas/signal_node_login @@ -0,0 +1,116 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "node.login" + }, + "event": { + "type": "object", + "properties": { + "settings": { + "type": "object", + "properties": { + "address": { "type": "string" }, + "currency": { "type": "string" }, + "networks/current-network": { "type": "string" }, + "dapps-address": { "type": "string" }, + "device-name": { "type": "string" }, + "display-name": { "type": "string" }, + "eip1581-address": { "type": "string" }, + "installation-id": { "type": "string" }, + "key-uid": { "type": "string" }, + "latest-derived-path": { "type": "integer" }, + "link-preview-request-enabled": { "type": "boolean" }, + "messages-from-contacts-only": { "type": "boolean" }, + "mnemonic": { "type": "string" }, + "mutual-contact-enabled?": { "type": "boolean" }, + "name": { "type": "string" }, + "networks/networks": { "type": "array" }, + "photo-path": { "type": "string" }, + "preview-privacy?": { "type": "boolean" }, + "public-key": { "type": "string" }, + "signing-phrase": { "type": "string" }, + "default-sync-period": { "type": "integer" }, + "send-push-notifications?": { "type": "boolean" }, + "appearance": { "type": "integer" }, + "profile-pictures-show-to": { "type": "integer" }, + "profile-pictures-visibility": { "type": "integer" }, + "use-mailservers?": { "type": "boolean" }, + "wallet-root-address": { "type": "string" }, + "send-status-updates?": { "type": "boolean" }, + "current-user-status": { + "type": "object", + "properties": { + "publicKey": { "type": "string" }, + "statusType": { "type": "integer" }, + "clock": { "type": "integer" }, + "text": { "type": "string" } + }, + "required": ["publicKey", "statusType", "clock", "text"] + }, + "gifs/recent-gifs": { "type": ["null", "array"] }, + "gifs/favorite-gifs": { "type": ["null", "array"] }, + "last-backup": { "type": "integer" }, + "backup-enabled?": { "type": "boolean" }, + "gifs/api-key": { "type": "string" }, + "show-community-asset-when-sending-tokens?": { "type": "boolean" }, + "display-assets-below-balance-threshold": { "type": "integer" }, + "url-unfurling-mode": { "type": "integer" }, + "compressedKey": { "type": "string" }, + "emojiHash": { + "type": "array", + "items": { "type": "string" } + } + }, + "required": [ + "address", "currency", "networks/current-network", "dapps-address", + "device-name", "display-name", "eip1581-address", "installation-id", + "key-uid", "latest-derived-path", "link-preview-request-enabled", + "messages-from-contacts-only", "mutual-contact-enabled?", + "name", "networks/networks", "photo-path", "preview-privacy?", + "public-key", "signing-phrase", "default-sync-period", + "send-push-notifications?", "appearance", "profile-pictures-show-to", + "profile-pictures-visibility", "use-mailservers?", "wallet-root-address", + "send-status-updates?", "current-user-status", "gifs/recent-gifs", + "gifs/favorite-gifs", "last-backup", "backup-enabled?", "gifs/api-key", + "show-community-asset-when-sending-tokens?", + "display-assets-below-balance-threshold", "url-unfurling-mode", + "compressedKey", "emojiHash" + ] + }, + "account": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "timestamp": { "type": "integer" }, + "identicon": { "type": "string" }, + "colorHash": { + "type": "array", + "items": { + "type": "array", + "items": { "type": "integer" }, + "minItems": 2, + "maxItems": 2 + } + }, + "colorId": { "type": "integer" }, + "customizationColor": { "type": "string" }, + "keycard-pairing": { "type": "string" }, + "key-uid": { "type": "string" }, + "images": { "type": ["null", "array"] }, + "kdfIterations": { "type": "integer" } + }, + "required": [ + "name", "timestamp", "identicon", "colorHash", "colorId", + "customizationColor", "keycard-pairing", "key-uid", "images", + "kdfIterations" + ] + } + }, + "required": ["settings", "account"] + } + }, + "required": ["type", "event"] +} \ No newline at end of file diff --git a/tests-functional/schemas/signal_node_ready b/tests-functional/schemas/signal_node_ready new file mode 100644 index 00000000000..cba554d56c1 --- /dev/null +++ b/tests-functional/schemas/signal_node_ready @@ -0,0 +1,15 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "node.ready" + }, + "event": { + "type": "null" + } + }, + "required": ["type", "event"], + "additionalProperties": false +} \ No newline at end of file diff --git a/tests-functional/schemas/signal_node_started b/tests-functional/schemas/signal_node_started new file mode 100644 index 00000000000..7c6e6ed5c4f --- /dev/null +++ b/tests-functional/schemas/signal_node_started @@ -0,0 +1,15 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "node.started" + }, + "event": { + "type": "null" + } + }, + "required": ["type", "event"], + "additionalProperties": false +} \ No newline at end of file diff --git a/tests-functional/schemas/wakuext_peers b/tests-functional/schemas/wakuext_peers index c64fd868bab..69b3348dd6b 100644 --- a/tests-functional/schemas/wakuext_peers +++ b/tests-functional/schemas/wakuext_peers @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "type": "object", "properties": { "id": { diff --git a/tests-functional/schemas/wallet_checkRecentHistoryForChainIDs b/tests-functional/schemas/wallet_checkRecentHistoryForChainIDs index 242060c353e..81352045167 100644 --- a/tests-functional/schemas/wallet_checkRecentHistoryForChainIDs +++ b/tests-functional/schemas/wallet_checkRecentHistoryForChainIDs @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/wallet_createMultiTransaction/transferTx_error b/tests-functional/schemas/wallet_createMultiTransaction/transferTx_error index e2e78a1f728..631c21c4764 100644 --- a/tests-functional/schemas/wallet_createMultiTransaction/transferTx_error +++ b/tests-functional/schemas/wallet_createMultiTransaction/transferTx_error @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "error": { "properties": { diff --git a/tests-functional/schemas/wallet_createMultiTransaction/transferTx_positive b/tests-functional/schemas/wallet_createMultiTransaction/transferTx_positive index 6052e4b98ce..a261cdb4ab8 100644 --- a/tests-functional/schemas/wallet_createMultiTransaction/transferTx_positive +++ b/tests-functional/schemas/wallet_createMultiTransaction/transferTx_positive @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "integer" diff --git a/tests-functional/schemas/wallet_fetchAllCurrencyFormats b/tests-functional/schemas/wallet_fetchAllCurrencyFormats index ce737e53fd6..db80607f4e6 100644 --- a/tests-functional/schemas/wallet_fetchAllCurrencyFormats +++ b/tests-functional/schemas/wallet_fetchAllCurrencyFormats @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/wallet_getCachedCurrencyFormats b/tests-functional/schemas/wallet_getCachedCurrencyFormats index 8ce520bc107..7e316f04e6c 100644 --- a/tests-functional/schemas/wallet_getCachedCurrencyFormats +++ b/tests-functional/schemas/wallet_getCachedCurrencyFormats @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/wallet_getCryptoOnRamps b/tests-functional/schemas/wallet_getCryptoOnRamps index 9168aabc76a..17aff0f40f6 100644 --- a/tests-functional/schemas/wallet_getCryptoOnRamps +++ b/tests-functional/schemas/wallet_getCryptoOnRamps @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/wallet_getEthereumChains b/tests-functional/schemas/wallet_getEthereumChains index b0e3cecc1db..9206c30f43a 100644 --- a/tests-functional/schemas/wallet_getEthereumChains +++ b/tests-functional/schemas/wallet_getEthereumChains @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" @@ -74,7 +74,7 @@ "type": "object" }, "Test": { - "type": "null" + "type": ["null", "object"] } }, "required": [ diff --git a/tests-functional/schemas/wallet_getPendingTransactions b/tests-functional/schemas/wallet_getPendingTransactions index 64a3d80d59e..ec94250f3f0 100644 --- a/tests-functional/schemas/wallet_getPendingTransactions +++ b/tests-functional/schemas/wallet_getPendingTransactions @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/wallet_getPendingTransactionsForIdentities b/tests-functional/schemas/wallet_getPendingTransactionsForIdentities index 64a3d80d59e..ec94250f3f0 100644 --- a/tests-functional/schemas/wallet_getPendingTransactionsForIdentities +++ b/tests-functional/schemas/wallet_getPendingTransactionsForIdentities @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/wallet_getTokenList b/tests-functional/schemas/wallet_getTokenList index 2a8a7dbbafe..8c430c8c13d 100644 --- a/tests-functional/schemas/wallet_getTokenList +++ b/tests-functional/schemas/wallet_getTokenList @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/schemas/wallet_startWallet b/tests-functional/schemas/wallet_startWallet index 242060c353e..3be76fedf07 100644 --- a/tests-functional/schemas/wallet_startWallet +++ b/tests-functional/schemas/wallet_startWallet @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/schema#", + "$schema": "http://json-schema.org/draft-07/schema", "properties": { "id": { "type": "string" diff --git a/tests-functional/src/__init__.py b/tests-functional/src/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests-functional/src/libs/__init__.py b/tests-functional/src/libs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests-functional/src/libs/base_api_client.py b/tests-functional/src/libs/base_api_client.py new file mode 100644 index 00000000000..0ae92d2a907 --- /dev/null +++ b/tests-functional/src/libs/base_api_client.py @@ -0,0 +1,28 @@ +import requests +import json +from tenacity import retry, stop_after_delay, wait_fixed +from src.libs.custom_logger import get_custom_logger + +logger = get_custom_logger(__name__) + +class BaseAPIClient: + def __init__(self, base_url): + self.base_url = base_url + + @retry(stop=stop_after_delay(10), wait=wait_fixed(0.5), reraise=True) + def send_post_request(self, endpoint, payload=None, headers=None, timeout=10): + if headers is None: + headers = {"Content-Type": "application/json"} + if payload is None: + payload = {} + + url = f"{self.base_url}/{endpoint}" + logger.info(f"Sending POST request to {url} with payload: {json.dumps(payload)}") + try: + response = requests.post(url, headers=headers, data=json.dumps(payload), timeout=timeout) + response.raise_for_status() + logger.info(f"Response received: {response.status_code} - {response.text}") + return response.json() + except requests.exceptions.RequestException as e: + logger.error(f"Request to {url} failed: {str(e)}") + raise diff --git a/tests-functional/src/libs/common.py b/tests-functional/src/libs/common.py new file mode 100644 index 00000000000..4faa384738f --- /dev/null +++ b/tests-functional/src/libs/common.py @@ -0,0 +1,28 @@ +from time import sleep +from src.libs.custom_logger import get_custom_logger +import os +import allure +import uuid + +logger = get_custom_logger(__name__) + + +def attach_allure_file(file): + logger.debug(f"Attaching file {file}") + allure.attach.file(file, name=os.path.basename(file), attachment_type=allure.attachment_type.TEXT) + + +def delay(num_seconds): + logger.debug(f"Sleeping for {num_seconds} seconds") + sleep(num_seconds) + +def create_unique_data_dir(base_dir: str, index: int) -> str: + """Generate a unique data directory for each node instance.""" + unique_id = str(uuid.uuid4())[:8] + unique_dir = os.path.join(base_dir, f"data_{index}_{unique_id}") + os.makedirs(unique_dir, exist_ok=True) + return unique_dir + +def get_project_root() -> str: + """Returns the root directory of the project.""" + return os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")) diff --git a/tests-functional/src/libs/custom_logger.py b/tests-functional/src/libs/custom_logger.py new file mode 100644 index 00000000000..ec2f8e567b6 --- /dev/null +++ b/tests-functional/src/libs/custom_logger.py @@ -0,0 +1,24 @@ +import logging + +max_log_line_length = 10000 + + +def log_length_filter(max_length): + class logLengthFilter(logging.Filter): + def filter(self, record): + if len(record.getMessage()) > max_length: + logging.getLogger(record.name).log( + record.levelno, f"Log line was discarded because it's longer than max_log_line_length={max_log_line_length}" + ) + return False + return True + + return logLengthFilter() + + +def get_custom_logger(name): + logging.getLogger("urllib3").setLevel(logging.WARNING) + logging.getLogger("docker").setLevel(logging.WARNING) + logger = logging.getLogger(name) + logger.addFilter(log_length_filter(max_log_line_length)) + return logger diff --git a/tests-functional/src/node/__init__.py b/tests-functional/src/node/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests-functional/src/node/rpc_client.py b/tests-functional/src/node/rpc_client.py new file mode 100644 index 00000000000..e8c4fd888ee --- /dev/null +++ b/tests-functional/src/node/rpc_client.py @@ -0,0 +1,94 @@ +from src.libs.base_api_client import BaseAPIClient +from constants import * +from src.libs.custom_logger import get_custom_logger +from tenacity import retry, stop_after_attempt, wait_fixed + +logger = get_custom_logger(__name__) + + +class StatusNodeRPC(BaseAPIClient): + def __init__(self, port, node_name): + super().__init__(f"http://127.0.0.1:{port}/statusgo") + self.node_name = node_name + + @retry( + stop=stop_after_attempt(3), + wait=wait_fixed(1), + reraise=True + ) + def send_rpc_request(self, method, params=None, timeout=API_REQUEST_TIMEOUT): + """Send JSON-RPC requests, used for standard JSON-RPC API calls.""" + payload = {"jsonrpc": "2.0", "method": method, "params": params or [], "id": 1} + logger.info(f"Sending JSON-RPC request to {self.base_url} with payload: {payload}") + + response = self.send_post_request("CallRPC", payload, timeout=timeout) + + if response.get("error"): + logger.error(f"RPC request failed with error: {response['error']}") + raise RuntimeError(f"RPC request failed with error: {response['error']}") + + return response + + @retry( + stop=stop_after_attempt(3), + wait=wait_fixed(1), + reraise=True + ) + def initialize_application(self, data_dir, timeout=API_REQUEST_TIMEOUT): + """Send a direct POST request to the InitializeApplication endpoint.""" + payload = {"dataDir": data_dir} + logger.info(f"Sending direct POST request to InitializeApplication with payload: {payload}") + + response = self.send_post_request("InitializeApplication", payload, timeout=timeout) + + if response.get("error"): + logger.error(f"InitializeApplication request failed with error: {response['error']}") + raise RuntimeError(f"Failed to initialize application: {response['error']}") + + return response + + @retry( + stop=stop_after_attempt(3), + wait=wait_fixed(1), + reraise=True + ) + def create_account_and_login(self, account_data, timeout=API_REQUEST_TIMEOUT): + """Send a direct POST request to CreateAccountAndLogin endpoint.""" + payload = { + "rootDataDir": account_data.get("rootDataDir"), + "displayName": account_data.get("displayName", "test1"), + "password": account_data.get("password", "test1"), + "customizationColor": account_data.get("customizationColor", "primary") + } + logger.info(f"Sending direct POST request to CreateAccountAndLogin with payload: {payload}") + + response = self.send_post_request("CreateAccountAndLogin", payload, timeout=timeout) + + if response.get("error"): + logger.error(f"CreateAccountAndLogin request failed with error: {response['error']}") + raise RuntimeError(f"Failed to create account and login: {response['error']}") + + return response + + @retry( + stop=stop_after_attempt(3), + wait=wait_fixed(1), + reraise=True + ) + def start_messenger(self, timeout=API_REQUEST_TIMEOUT): + """Send JSON-RPC request to start Waku messenger.""" + payload = { + "jsonrpc": "2.0", + "method": "wakuext_startMessenger", + "params": [], + "id": 1 + } + logger.info(f"Sending JSON-RPC request to start Waku messenger: {payload}") + + response = self.send_post_request("CallRPC", payload, timeout=timeout) + + if response.get("error"): + logger.error(f"Starting Waku messenger failed with error: {response['error']}") + raise RuntimeError(f"Failed to start Waku messenger: {response['error']}") + + return response diff --git a/tests-functional/src/node/status_node.py b/tests-functional/src/node/status_node.py new file mode 100644 index 00000000000..dd05027644d --- /dev/null +++ b/tests-functional/src/node/status_node.py @@ -0,0 +1,174 @@ +import os +import asyncio +import random +import shutil +import signal +import string +import subprocess +import threading +import time + +from clients.status_backend import RpcClient +from conftest import option +from src.libs.custom_logger import get_custom_logger +from src.node.rpc_client import StatusNodeRPC +from clients.signals import SignalClient + +logger = get_custom_logger(__name__) + + +class StatusNode: + def __init__(self, name=None, port=None, pubkey=None): + self.data_dir = None + try: + os.remove(f"{name}.log") + except: + pass + self.name = self.random_node_name() if not name else name.lower() + self.port = str(random.randint(1024, 65535)) if not port else port + self.pubkey = pubkey + self.process = None + self.log_thread = None + self.capture_logs = True + self.logs = [] + self.pid = None + self.signal_client = None + self.last_response = None + self.api = StatusNodeRPC(self.port, self.name) + + def setup_method(self): + # Set up RPC client + self.rpc_client = RpcClient(option.rpc_url_statusd) + # Set up WebSocket signal client + await_signals = ["history.request.started", "history.request.completed"] + self.signal_client = SignalClient(option.ws_url_statusd, await_signals) + + # Start WebSocket connection in a separate thread + websocket_thread = threading.Thread(target=self.signal_client._connect) + websocket_thread.daemon = True + websocket_thread.start() + + def initialize_node(self, name, port, data_dir, account_data): + """Centralized method to initialize a node.""" + self.name = name + self.port = port + self.start(data_dir) + self.wait_fully_started() + self.create_account_and_login(account_data) + self.start_messenger() + self.pubkey = self.get_pubkey(account_data["displayName"]) + + def start_node(self, command): + """Start the node using a subprocess command.""" + logger.info(f"Starting node with command: {command}") + self.process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + self.pid = self.process.pid + self.log_thread = self.capture_process_logs(self.process, self.logs) + + def start(self, data_dir, capture_logs=True): + """Start the status-backend node and initialize it before subscribing to signals.""" + self.capture_logs = capture_logs + self.data_dir = data_dir + command = ["./status-backend", f"--address=localhost:{self.port}"] + self.start_node(command) + self.wait_fully_started() + self.last_response = self.api.initialize_application(data_dir) + self.api = StatusNodeRPC(self.port, self.name) + self.start_signal_client() + + def create_account_and_login(self, account_data): + """Create an account and log in using the status-backend.""" + logger.info(f"Creating account and logging in for node {self.name}") + self.api.create_account_and_login(account_data) + + def start_messenger(self): + """Start the Waku messenger.""" + logger.info(f"Starting Waku messenger for node {self.name}") + self.api.start_messenger() + + def start_signal_client(self): + """Start a SignalClient for the given node to listen for WebSocket signals.""" + ws_url = f"ws://localhost:{self.port}" + await_signals = ["history.request.started", "history.request.completed"] + self.signal_client = SignalClient(ws_url, await_signals) + + websocket_thread = threading.Thread(target=self.signal_client._connect) + websocket_thread.daemon = True + websocket_thread.start() + logger.info("WebSocket client started and subscribed to signals.") + + def wait_fully_started(self): + """Wait until the node logs indicate that the server has started.""" + logger.info(f"Waiting for {self.name} to fully start...") + start_time = time.time() + while time.time() - start_time < 20: + if any("server started" in log for log in self.logs): + logger.info(f"Node {self.name} has fully started.") + return + time.sleep(0.5) + raise TimeoutError(f"Node {self.name} did not fully start in time.") + + def capture_process_logs(self, process, logs): + """Capture logs from a subprocess.""" + + def read_output(): + while True: + line = process.stdout.readline() + if not line: + break + logs.append(line.strip()) + logger.debug(f"{self.name.upper()} - {line.strip()}") + + log_thread = threading.Thread(target=read_output) + log_thread.daemon = True + log_thread.start() + return log_thread + + def random_node_name(self, length=10): + """Generate a random node name.""" + allowed_chars = string.ascii_lowercase + string.digits + "_-" + return ''.join(random.choice(allowed_chars) for _ in range(length)) + + def get_pubkey(self, display_name): + """Retrieve public-key based on display name from accounts_getAccounts response.""" + response = self.api.send_rpc_request("accounts_getAccounts") + + accounts = response.get("result", []) + for account in accounts: + if account.get("name") == display_name: + return account.get("public-key") + raise ValueError(f"Public key not found for display name: {display_name}") + + def wait_for_signal(self, signal_type, expected_event=None, timeout=20): + """Wait for a signal using the signal client and validate against expected event details.""" + return self.signal_client.wait_for_signal(signal_type, expected_event, timeout) + + def stop(self, remove_local_data=True): + """Stop the status-backend process.""" + if self.process: + logger.info(f"Stopping node with name: {self.name}") + self.process.kill() + if self.capture_logs: + self.log_thread.join() + if remove_local_data: + node_dir = f"test-{self.name}" + if os.path.exists(node_dir): + try: + shutil.rmtree(node_dir) + except Exception as ex: + logger.warning(f"Couldn't delete node dir {node_dir} because of {str(ex)}") + self.process = None + + def send_contact_request(self, pubkey, message): + params = [{"id": pubkey, "message": message}] + return self.api.send_rpc_request("wakuext_sendContactRequest", params) + + def pause_process(self): + if self.pid: + logger.info(f"Pausing node with pid: {self.pid}") + os.kill(self.pid, signal.SIGTSTP) + + def resume_process(self): + if self.pid: + logger.info(f"Resuming node with pid: {self.pid}") + os.kill(self.pid, signal.SIGCONT) diff --git a/tests-functional/src/steps/__init__.py b/tests-functional/src/steps/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests-functional/src/steps/common.py b/tests-functional/src/steps/common.py new file mode 100644 index 00000000000..6393cef275b --- /dev/null +++ b/tests-functional/src/steps/common.py @@ -0,0 +1,117 @@ +from contextlib import contextmanager +import inspect +import subprocess +import pytest +from src.libs.common import delay +from src.libs.custom_logger import get_custom_logger +from src.node.status_node import StatusNode +from datetime import datetime +from constants import * + +logger = get_custom_logger(__name__) + + +class StepsCommon: + @pytest.fixture(scope="function", autouse=False) + def start_1_node(self): + account_data = { + **ACCOUNT_PAYLOAD_DEFAULTS, + "rootDataDir": LOCAL_DATA_DIR1, + "displayName": "first_node_user" + } + random_port = str(random.randint(1024, 65535)) + + self.first_node = StatusNode() + self.first_node.initialize_node("first_node", random_port, LOCAL_DATA_DIR1, account_data) + self.first_node_pubkey = self.first_node.get_pubkey() + + @pytest.fixture(scope="function", autouse=False) + def start_2_nodes(self): + logger.debug(f"Running fixture setup: {inspect.currentframe().f_code.co_name}") + + account_data_first = { + **ACCOUNT_PAYLOAD_DEFAULTS, + "rootDataDir": LOCAL_DATA_DIR1, + "displayName": "first_node_user" + } + account_data_second = { + **ACCOUNT_PAYLOAD_DEFAULTS, + "rootDataDir": LOCAL_DATA_DIR2, + "displayName": "second_node_user" + } + + self.first_node = StatusNode(name="first_node") + self.first_node.start(data_dir=LOCAL_DATA_DIR1) + self.first_node.wait_fully_started() + + self.second_node = StatusNode(name="second_node") + self.second_node.start(data_dir=LOCAL_DATA_DIR2) + self.second_node.wait_fully_started() + + self.first_node.create_account_and_login(account_data_first) + self.second_node.create_account_and_login(account_data_second) + + delay(4) + self.first_node.start_messenger() + delay(1) + self.second_node.start_messenger() + + self.first_node_pubkey = self.first_node.get_pubkey("first_node_user") + self.second_node_pubkey = self.second_node.get_pubkey("second_node_user") + + logger.debug(f"First Node Public Key: {self.first_node_pubkey}") + logger.debug(f"Second Node Public Key: {self.second_node_pubkey}") + + @contextmanager + def add_latency(self): + """Add network latency""" + logger.debug("Adding network latency") + subprocess.Popen(LATENCY_CMD, shell=True) + try: + yield + finally: + logger.debug("Removing network latency") + subprocess.Popen(REMOVE_TC_CMD, shell=True) + + @contextmanager + def add_packet_loss(self): + """Add packet loss""" + logger.debug("Adding packet loss") + subprocess.Popen(PACKET_LOSS_CMD, shell=True) + try: + yield + finally: + logger.debug("Removing packet loss") + subprocess.Popen(REMOVE_TC_CMD, shell=True) + + @contextmanager + def add_low_bandwidth(self): + """Add low bandwidth""" + logger.debug("Adding low bandwidth") + subprocess.Popen(LOW_BANDWIDTH_CMD, shell=True) + try: + yield + finally: + logger.debug("Removing low bandwidth") + subprocess.Popen(REMOVE_TC_CMD, shell=True) + + @contextmanager + def node_pause(self, node): + logger.debug("Entering context manager: node_pause") + node.pause_process() + try: + yield + finally: + logger.debug(f"Exiting context manager: node_pause") + node.resume_process() + + def send_with_timestamp(self, send_method, id, message): + timestamp = datetime.now().strftime("%H:%M:%S") + response = send_method(id, message) + response_messages = response["result"]["messages"] + message_id = None + for m in response_messages: + if m["text"] == message: + message_id = m["id"] + break + return timestamp, message_id \ No newline at end of file diff --git a/tests-functional/tests/test_accounts.py b/tests-functional/tests/test_accounts.py index 0856690182d..0b3c35030ea 100644 --- a/tests-functional/tests/test_accounts.py +++ b/tests-functional/tests/test_accounts.py @@ -2,12 +2,12 @@ import pytest -from test_cases import StatusDTestCase +from test_cases import StatusBackendTestCase @pytest.mark.accounts @pytest.mark.rpc -class TestAccounts(StatusDTestCase): +class TestAccounts(StatusBackendTestCase): @pytest.mark.parametrize( "method, params", @@ -22,4 +22,4 @@ def test_(self, method, params): _id = str(random.randint(1, 8888)) response = self.rpc_client.rpc_valid_request(method, params, _id) - self.rpc_client.verify_json_schema(response, method) + self.rpc_client.verify_json_schema(response.json(), method) diff --git a/tests-functional/tests/test_cases.py b/tests-functional/tests/test_cases.py index a6f60ac9f80..ef77c85e023 100644 --- a/tests-functional/tests/test_cases.py +++ b/tests-functional/tests/test_cases.py @@ -7,7 +7,7 @@ import pytest from clients.signals import SignalClient -from clients.status_backend import RpcClient +from clients.status_backend import RpcClient, StatusBackend from conftest import option from constants import user_1, user_2 @@ -21,14 +21,17 @@ def setup_method(self): ) -class WalletTestCase(StatusDTestCase): +class StatusBackendTestCase: + def setup_class(self): + self.rpc_client = StatusBackend() + self.network_id = 31337 - def setup_method(self): - super().setup_method() + +class WalletTestCase(StatusBackendTestCase): def wallet_create_multi_transaction(self, **kwargs): method = "wallet_createMultiTransaction" - transferTx_data = { + transfer_tx_data = { "data": "", "from": user_1.address, "gas": "0x5BBF", @@ -40,8 +43,8 @@ def wallet_create_multi_transaction(self, **kwargs): "value": "0x5af3107a4000", } for key, new_value in kwargs.items(): - if key in transferTx_data: - transferTx_data[key] = new_value + if key in transfer_tx_data: + transfer_tx_data[key] = new_value else: logging.info( f"Warning: The key '{key}' does not exist in the transferTx parameters and will be ignored.") @@ -58,7 +61,7 @@ def wallet_create_multi_transaction(self, **kwargs): { "bridgeName": "Transfer", "chainID": 31337, - "transferTx": transferTx_data + "transferTx": transfer_tx_data } ], f"{option.password}", @@ -81,7 +84,6 @@ def send_valid_multi_transaction(self, **kwargs): class TransactionTestCase(WalletTestCase): def setup_method(self): - super().setup_method() self.tx_hash = self.send_valid_multi_transaction() @@ -89,10 +91,6 @@ class EthRpcTestCase(WalletTestCase): @pytest.fixture(autouse=True, scope='class') def tx_data(self): - self.rpc_client = RpcClient( - option.rpc_url_statusd - ) - tx_hash = self.send_valid_multi_transaction() self.wait_until_tx_not_pending(tx_hash) @@ -103,8 +101,8 @@ def tx_data(self): except (KeyError, json.JSONDecodeError): raise Exception(receipt.content) - TxData = namedtuple("TxData", ["tx_hash", "block_number", "block_hash"]) - return TxData(tx_hash, block_number, block_hash) + tx_data = namedtuple("TxData", ["tx_hash", "block_number", "block_hash"]) + return tx_data(tx_hash, block_number, block_hash) def get_block_header(self, block_number): method = "ethclient_headerByNumber" @@ -142,9 +140,3 @@ def setup_method(self): websocket_thread = threading.Thread(target=self.signal_client._connect) websocket_thread.daemon = True websocket_thread.start() - - -class StatusBackendTestCase: - - def setup_method(self): - pass diff --git a/tests-functional/tests/test_contact_request.py b/tests-functional/tests/test_contact_request.py new file mode 100644 index 00000000000..0490366e873 --- /dev/null +++ b/tests-functional/tests/test_contact_request.py @@ -0,0 +1,124 @@ +import logging +from uuid import uuid4 +from constants import * +from src.libs.common import delay +from src.node.status_node import StatusNode, logger +from src.steps.common import StepsCommon +from src.libs.common import create_unique_data_dir, get_project_root +from validators.contact_request_validator import ContactRequestValidator + + +class TestContactRequest(StepsCommon): + def test_contact_request_baseline(self): + timeout_secs = 180 + num_contact_requests = NUM_CONTACT_REQUESTS + project_root = get_project_root() + nodes = [] + + for index in range(num_contact_requests): + first_node = StatusNode(name=f"first_node_{index}") + second_node = StatusNode(name=f"second_node_{index}") + + data_dir_first = create_unique_data_dir(os.path.join(project_root, "tests-functional/local"), index) + data_dir_second = create_unique_data_dir(os.path.join(project_root, "tests-functional/local"), index) + + delay(2) + first_node.start(data_dir=data_dir_first) + second_node.start(data_dir=data_dir_second) + + account_data_first = { + "rootDataDir": data_dir_first, + "displayName": f"test_user_first_{index}", + "password": f"test_password_first_{index}", + "customizationColor": "primary" + } + account_data_second = { + "rootDataDir": data_dir_second, + "displayName": f"test_user_second_{index}", + "password": f"test_password_second_{index}", + "customizationColor": "primary" + } + first_node.create_account_and_login(account_data_first) + second_node.create_account_and_login(account_data_second) + + delay(5) + first_node.start_messenger() + second_node.start_messenger() + + first_node.pubkey = first_node.get_pubkey(account_data_first["displayName"]) + second_node.pubkey = second_node.get_pubkey(account_data_second["displayName"]) + + first_node.wait_fully_started() + second_node.wait_fully_started() + + nodes.append((first_node, second_node, account_data_first["displayName"], index)) + + # Validate contact requests + missing_contact_requests = [] + for first_node, second_node, display_name, index in nodes: + result = self.send_and_wait_for_message((first_node, second_node), display_name, index, timeout_secs) + timestamp, message_id, contact_request_message, response = result + + if not response: + missing_contact_requests.append((timestamp, contact_request_message, message_id)) + else: + validator = ContactRequestValidator(response) + validator.run_all_validations( + expected_chat_id=first_node.pubkey, + expected_display_name=display_name, + expected_text=f"contact_request_{index}" + ) + + if missing_contact_requests: + formatted_missing_requests = [ + f"Timestamp: {ts}, Message: {msg}, ID: {mid}" for ts, msg, mid in missing_contact_requests + ] + raise AssertionError( + f"{len(missing_contact_requests)} contact requests out of {num_contact_requests} didn't reach the peer node: " + + "\n".join(formatted_missing_requests) + ) + + def send_and_wait_for_message(self, nodes, display_name, index, timeout=45): + first_node, second_node = nodes + first_node_pubkey = first_node.get_pubkey(display_name) + contact_request_message = f"contact_request_{index}" + + timestamp, message_id = self.send_with_timestamp( + second_node.send_contact_request, first_node_pubkey, contact_request_message + ) + + response = second_node.send_contact_request(first_node_pubkey, contact_request_message) + + expected_event_started = {"requestId": "", "peerId": "", "batchIndex": 0, "numBatches": 1} + expected_event_completed = {"requestId": "", "peerId": "", "batchIndex": 0} + + try: + first_node.wait_for_signal("history.request.started", expected_event_started, timeout) + first_node.wait_for_signal("history.request.completed", expected_event_completed, timeout) + except TimeoutError as e: + logging.error(f"Signal validation failed: {str(e)}") + return timestamp, message_id, contact_request_message, None + + first_node.stop() + second_node.stop() + + return timestamp, message_id, contact_request_message, response + + def test_contact_request_with_latency(self): + with self.add_latency(): + self.test_contact_request_baseline() + + def test_contact_request_with_packet_loss(self): + with self.add_packet_loss(): + self.test_contact_request_baseline() + + def test_contact_request_with_low_bandwidth(self): + with self.add_low_bandwidth(): + self.test_contact_request_baseline() + + def test_contact_request_with_node_pause(self, start_2_nodes): + with self.node_pause(self.second_node): + message = str(uuid4()) + self.first_node.send_contact_request(self.second_node_pubkey, message) + delay(10) + assert self.second_node.wait_for_signal("history.request.completed") diff --git a/tests-functional/tests/test_init_status_app.py b/tests-functional/tests/test_init_status_app.py index 9afb3424cdf..4001dabb5a6 100644 --- a/tests-functional/tests/test_init_status_app.py +++ b/tests-functional/tests/test_init_status_app.py @@ -1,11 +1,9 @@ import pytest -from test_cases import StatusBackendTestCase - @pytest.mark.create_account @pytest.mark.rpc -class TestInitialiseApp(StatusBackendTestCase): +class TestInitialiseApp: @pytest.mark.init def test_init_app(self, init_status_backend): @@ -13,12 +11,12 @@ def test_init_app(self, init_status_backend): backend_client = init_status_backend assert backend_client is not None - mediaserver_started = backend_client.wait_for_signal( - "mediaserver.started") - - port = mediaserver_started['event']['port'] - assert type(port) is int, f"Port is not an integer, found {type(port)}" - - backend_client.wait_for_signal("node.started") - backend_client.wait_for_signal("node.ready") - backend_client.wait_for_signal("node.login") + + backend_client.verify_json_schema( + backend_client.wait_for_signal("mediaserver.started"), "signal_mediaserver_started") + backend_client.verify_json_schema( + backend_client.wait_for_signal("node.started"), "signal_node_started") + backend_client.verify_json_schema( + backend_client.wait_for_signal("node.ready"), "signal_node_ready") + backend_client.verify_json_schema( + backend_client.wait_for_signal("node.login"), "signal_node_login") diff --git a/tests-functional/tests/test_router.py b/tests-functional/tests/test_router.py index bc5008422f3..5af0fa508c3 100644 --- a/tests-functional/tests/test_router.py +++ b/tests-functional/tests/test_router.py @@ -111,5 +111,5 @@ def test_tx_from_route(self): tx_details = response.json()["result"] assert tx_details["value"] == amount_in - assert tx_details["to"] == user_2.address - assert tx_details["from"] == user_1.address + assert tx_details["to"].upper() == user_2.address.upper() + assert tx_details["from"].upper() == user_1.address.upper() diff --git a/tests-functional/tests/test_waku_rpc.py b/tests-functional/tests/test_waku_rpc.py index 3ddf91f0c20..cde29a041d9 100644 --- a/tests-functional/tests/test_waku_rpc.py +++ b/tests-functional/tests/test_waku_rpc.py @@ -6,11 +6,10 @@ import pytest from conftest import option -from test_cases import StatusDTestCase +from test_cases import StatusBackendTestCase -@pytest.mark.skip("to be reworked via status-backend") -class TestRpc(StatusDTestCase): +class TestRpc(StatusBackendTestCase): @pytest.mark.parametrize( "method, params", @@ -21,12 +20,12 @@ class TestRpc(StatusDTestCase): def test_(self, method, params): _id = str(random.randint(1, 8888)) - response = self.rpc_client.rpc_valid_request(method, params, _id, url=option.rpc_url_2) - self.rpc_client.verify_json_schema(response, method) + response = self.rpc_client.rpc_valid_request(method, params, _id) + self.rpc_client.verify_json_schema(response.json(), method) @pytest.mark.skip("to be reworked via status-backend") -class TestRpcMessaging(StatusDTestCase): +class TestRpcMessaging(StatusBackendTestCase): @dataclass class User: rpc_url: str @@ -99,5 +98,5 @@ def test_add_contact(self): self.rpc_client.verify_is_valid_json_rpc_response(response) response = response.json() - assert response["result"][0]["added"] == True + assert response["result"][0]["added"] is True assert response["result"][0]["id"] == user[1].chat_public_key diff --git a/tests-functional/tests/test_wallet_rpc.py b/tests-functional/tests/test_wallet_rpc.py index 64e20041d42..968e599b9da 100644 --- a/tests-functional/tests/test_wallet_rpc.py +++ b/tests-functional/tests/test_wallet_rpc.py @@ -5,7 +5,7 @@ import pytest from conftest import option -from test_cases import StatusDTestCase, TransactionTestCase +from test_cases import StatusBackendTestCase, TransactionTestCase @pytest.mark.wallet @@ -34,7 +34,7 @@ def test_tx_(self, method, params): params[0][0]["hash"] = self.tx_hash response = self.rpc_client.rpc_valid_request(method, params, _id) - self.rpc_client.verify_json_schema(response, method) + self.rpc_client.verify_json_schema(response.json(), method) def test_create_multi_transaction(self): response = self.wallet_create_multi_transaction() @@ -71,12 +71,12 @@ def test_create_multi_transaction_validation(self, method, assert expected_error_text in actual_error_text, \ f"got error: {actual_error_text} that does not include: {expected_error_text}" - self.rpc_client.verify_json_schema(response, "wallet_createMultiTransaction/transferTx_error") + self.rpc_client.verify_json_schema(response.json(), "wallet_createMultiTransaction/transferTx_error") @pytest.mark.wallet @pytest.mark.rpc -class TestRpc(StatusDTestCase): +class TestRpc(StatusBackendTestCase): @pytest.mark.parametrize( "method, params", @@ -92,4 +92,4 @@ def test_(self, method, params): _id = str(random.randint(1, 8888)) response = self.rpc_client.rpc_valid_request(method, params, _id) - self.rpc_client.verify_json_schema(response, method) + self.rpc_client.verify_json_schema(response.json(), method) diff --git a/tests-functional/validators/contact_request_validator.py b/tests-functional/validators/contact_request_validator.py new file mode 100644 index 00000000000..81f4bf84120 --- /dev/null +++ b/tests-functional/validators/contact_request_validator.py @@ -0,0 +1,36 @@ +import logging + +from src.steps.common import logger + + +class ContactRequestValidator: + """Validator class for contact request responses.""" + + def __init__(self, response): + self.response = response + + def validate_response_structure(self): + """Check the overall structure of the response.""" + assert self.response.get("jsonrpc") == "2.0", "Invalid JSON-RPC version" + assert "result" in self.response, "Missing 'result' in response" + + def validate_chat_data(self, expected_chat_id, expected_display_name, expected_text): + """Validate the chat data fields in the response.""" + chats = self.response["result"].get("chats", []) + assert len(chats) > 0, "No chats found in the response" + + chat = chats[0] # Validate the first chat as an example + assert chat.get("id") == expected_chat_id, f"Chat ID mismatch: Expected {expected_chat_id}" + assert chat.get("name").startswith("0x"), "Invalid chat name format" + + last_message = chat.get("lastMessage", {}) + # assert last_message.get("displayName") == expected_display_name, "Display name mismatch" + assert last_message.get("text") == expected_text, "Message text mismatch" + assert last_message.get("contactRequestState") == 1, "Unexpected contact request state" + assert "compressedKey" in last_message, "Missing 'compressedKey' in last message" + + def run_all_validations(self, expected_chat_id, expected_display_name, expected_text): + """Run all validation methods for the contact request response.""" + self.validate_response_structure() + self.validate_chat_data(expected_chat_id, expected_display_name, expected_text) + logger.info("All validations passed for the contact request response.") \ No newline at end of file diff --git a/timesource/timesource.go b/timesource/timesource.go index 6b85c394822..e21e8ea6832 100644 --- a/timesource/timesource.go +++ b/timesource/timesource.go @@ -8,10 +8,10 @@ import ( "time" "github.com/beevik/ntp" + "go.uber.org/zap" "github.com/status-im/status-go/common" - - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) const ( @@ -162,10 +162,10 @@ func (s *NTPTimeSource) Now() time.Time { func (s *NTPTimeSource) updateOffset() error { offset, err := computeOffset(s.timeQuery, s.servers, s.allowedFailures) if err != nil { - log.Error("failed to compute offset", "error", err) + logutils.ZapLogger().Error("failed to compute offset", zap.Error(err)) return errUpdateOffset } - log.Info("Difference with ntp servers", "offset", offset) + logutils.ZapLogger().Info("Difference with ntp servers", zap.Duration("offset", offset)) s.mu.Lock() defer s.mu.Unlock() s.latestOffset = offset @@ -214,7 +214,7 @@ func (s *NTPTimeSource) Start() { if err != nil { // Failure to update can occur if the node is offline. // Instead of returning an error, continue with the process as the update will be retried periodically. - log.Error("failed to update offset", err) + logutils.ZapLogger().Error("failed to update offset", zap.Error(err)) } s.runPeriodically(s.updateOffset, err == nil) diff --git a/transactions/pendingtxtracker.go b/transactions/pendingtxtracker.go index 11bb7415ee3..1b988c69737 100644 --- a/transactions/pendingtxtracker.go +++ b/transactions/pendingtxtracker.go @@ -10,13 +10,15 @@ import ( "strings" "time" + "go.uber.org/zap" + eth "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" ethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/services/rpcfilters" "github.com/status-im/status-go/services/wallet/bigint" @@ -80,7 +82,7 @@ type PendingTxTracker struct { eventFeed *event.Feed taskRunner *ConditionalRepeater - log log.Logger + logger *zap.Logger } func NewPendingTxTracker(db *sql.DB, rpcClient rpc.ClientInterface, rpcFilter *rpcfilters.Service, eventFeed *event.Feed, checkInterval time.Duration) *PendingTxTracker { @@ -89,7 +91,7 @@ func NewPendingTxTracker(db *sql.DB, rpcClient rpc.ClientInterface, rpcFilter *r rpcClient: rpcClient, eventFeed: eventFeed, rpcFilter: rpcFilter, - log: log.New("package", "status-go/transactions.PendingTxTracker"), + logger: logutils.ZapLogger().Named("PendingTxTracker"), } tm.taskRunner = NewConditionalRepeater(checkInterval, func(ctx context.Context) bool { return tm.fetchAndUpdateDB(ctx) @@ -107,10 +109,10 @@ func (tm *PendingTxTracker) fetchAndUpdateDB(ctx context.Context) bool { txs, err := tm.GetAllPending() if err != nil { - tm.log.Error("Failed to get pending transactions", "error", err) + tm.logger.Error("Failed to get pending transactions", zap.Error(err)) return WorkDone } - tm.log.Debug("Checking for PT status", "count", len(txs)) + tm.logger.Debug("Checking for PT status", zap.Int("count", len(txs))) txsMap := make(map[common.ChainID][]eth.Hash) for _, tx := range txs { @@ -121,26 +123,26 @@ func (tm *PendingTxTracker) fetchAndUpdateDB(ctx context.Context) bool { doneCount := 0 // Batch request for each chain for chainID, txs := range txsMap { - tm.log.Debug("Processing PTs", "chainID", chainID, "count", len(txs)) - batchRes, err := fetchBatchTxStatus(ctx, tm.rpcClient, chainID, txs, tm.log) + tm.logger.Debug("Processing PTs", zap.Stringer("chainID", chainID), zap.Int("count", len(txs))) + batchRes, err := fetchBatchTxStatus(ctx, tm.rpcClient, chainID, txs, tm.logger) if err != nil { - tm.log.Error("Failed to batch fetch pending transactions status for", "chainID", chainID, "error", err) + tm.logger.Error("Failed to batch fetch pending transactions status for", zap.Stringer("chainID", chainID), zap.Error(err)) continue } if len(batchRes) == 0 { - tm.log.Debug("No change to PTs status", "chainID", chainID) + tm.logger.Debug("No change to PTs status", zap.Stringer("chainID", chainID)) continue } - tm.log.Debug("PTs done", "chainID", chainID, "count", len(batchRes)) + tm.logger.Debug("PTs done", zap.Stringer("chainID", chainID), zap.Int("count", len(batchRes))) doneCount += len(batchRes) updateRes, err := tm.updateDBStatus(ctx, chainID, batchRes) if err != nil { - tm.log.Error("Failed to update pending transactions status for", "chainID", chainID, "error", err) + tm.logger.Error("Failed to update pending transactions status for", zap.Stringer("chainID", chainID), zap.Error(err)) continue } - tm.log.Debug("Emit notifications for PTs", "chainID", chainID, "count", len(updateRes)) + tm.logger.Debug("Emit notifications for PTs", zap.Stringer("chainID", chainID), zap.Int("count", len(updateRes))) tm.emitNotifications(chainID, updateRes) } @@ -148,7 +150,7 @@ func (tm *PendingTxTracker) fetchAndUpdateDB(ctx context.Context) bool { res = WorkDone } - tm.log.Debug("Done PTs iteration", "count", doneCount, "completed", res) + tm.logger.Debug("Done PTs iteration", zap.Int("count", doneCount), zap.Bool("completed", res)) return res } @@ -167,10 +169,10 @@ func (nr *nullableReceipt) UnmarshalJSON(data []byte) error { // fetchBatchTxStatus returns not pending transactions (confirmed or errored) // it excludes the still pending or errored request from the result -func fetchBatchTxStatus(ctx context.Context, rpcClient rpc.ClientInterface, chainID common.ChainID, hashes []eth.Hash, log log.Logger) ([]txStatusRes, error) { +func fetchBatchTxStatus(ctx context.Context, rpcClient rpc.ClientInterface, chainID common.ChainID, hashes []eth.Hash, logger *zap.Logger) ([]txStatusRes, error) { chainClient, err := rpcClient.AbstractEthClient(chainID) if err != nil { - log.Error("Failed to get chain client", "error", err) + logger.Error("Failed to get chain client", zap.Error(err)) return nil, err } @@ -188,7 +190,7 @@ func fetchBatchTxStatus(ctx context.Context, rpcClient rpc.ClientInterface, chai err = chainClient.BatchCallContext(reqCtx, batch) if err != nil { - log.Error("Transactions request fail", "error", err) + logger.Error("Transactions request fail", zap.Error(err)) return nil, err } @@ -196,18 +198,18 @@ func fetchBatchTxStatus(ctx context.Context, rpcClient rpc.ClientInterface, chai for i, b := range batch { err := b.Error if err != nil { - log.Error("Failed to get transaction", "error", err, "hash", hashes[i]) + logger.Error("Failed to get transaction", zap.Stringer("hash", hashes[i]), zap.Error(err)) continue } if b.Result == nil { - log.Error("Transaction not found", "hash", hashes[i]) + logger.Error("Transaction not found", zap.Stringer("hash", hashes[i])) continue } receiptWrapper, ok := b.Result.(*nullableReceipt) if !ok { - log.Error("Failed to cast transaction receipt", "hash", hashes[i]) + logger.Error("Failed to cast transaction receipt", zap.Stringer("hash", hashes[i])) continue } @@ -267,9 +269,9 @@ func (tm *PendingTxTracker) updateDBStatus(ctx context.Context, chainID common.C err = row.Scan(&autoDel) if err != nil { if err == sql.ErrNoRows { - tm.log.Warn("Missing entry while checking for auto_delete", "hash", br.hash) + tm.logger.Warn("Missing entry while checking for auto_delete", zap.Stringer("hash", br.hash)) } else { - tm.log.Error("Failed to retrieve auto_delete for pending transaction", "error", err, "hash", br.hash) + tm.logger.Error("Failed to retrieve auto_delete for pending transaction", zap.Stringer("hash", br.hash), zap.Error(err)) } continue } @@ -277,7 +279,7 @@ func (tm *PendingTxTracker) updateDBStatus(ctx context.Context, chainID common.C if autoDel { notifyFn, err := tm.DeleteBySQLTx(tx, chainID, br.hash) if err != nil && err != ErrStillPending { - tm.log.Error("Failed to delete pending transaction", "error", err, "hash", br.hash) + tm.logger.Error("Failed to delete pending transaction", zap.Stringer("hash", br.hash), zap.Error(err)) continue } notifyFunctions = append(notifyFunctions, notifyFn) @@ -287,17 +289,17 @@ func (tm *PendingTxTracker) updateDBStatus(ctx context.Context, chainID common.C res, err := updateStmt.ExecContext(ctx, txStatus, chainID, br.hash) if err != nil { - tm.log.Error("Failed to update pending transaction status", "error", err, "hash", br.hash) + tm.logger.Error("Failed to update pending transaction status", zap.Stringer("hash", br.hash), zap.Error(err)) continue } affected, err := res.RowsAffected() if err != nil { - tm.log.Error("Failed to get updated rows", "error", err, "hash", br.hash) + tm.logger.Error("Failed to get updated rows", zap.Stringer("hash", br.hash), zap.Error(err)) continue } if affected == 0 { - tm.log.Warn("Missing entry to update for", "hash", br.hash) + tm.logger.Warn("Missing entry to update for", zap.Stringer("hash", br.hash)) continue } } @@ -330,7 +332,7 @@ func (tm *PendingTxTracker) emitNotifications(chainID common.ChainID, changes [] jsonPayload, err := json.Marshal(payload) if err != nil { - tm.log.Error("Failed to marshal pending transaction status", "error", err, "hash", change.hash) + tm.logger.Error("Failed to marshal pending transaction status", zap.Stringer("hash", change.hash), zap.Error(err)) continue } tm.eventFeed.Send(walletevent.Event{ @@ -659,7 +661,7 @@ func (tm *PendingTxTracker) addPending(transaction *PendingTransaction) error { func (tm *PendingTxTracker) notifyPendingTransactionListeners(payload PendingTxUpdatePayload, addresses []eth.Address, timestamp uint64) { jsonPayload, err := json.Marshal(payload) if err != nil { - tm.log.Error("Failed to marshal PendingTxUpdatePayload", "error", err, "hash", payload.Hash) + tm.logger.Error("Failed to marshal PendingTxUpdatePayload", zap.Stringer("hash", payload.Hash), zap.Error(err)) return } diff --git a/transactions/transactor.go b/transactions/transactor.go index 5c9ded53879..5c3c3d41472 100644 --- a/transactions/transactor.go +++ b/transactions/transactor.go @@ -10,15 +10,17 @@ import ( "math/big" "time" + "go.uber.org/zap" + ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" gethtypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" "github.com/status-im/status-go/account" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/params" "github.com/status-im/status-go/rpc" "github.com/status-im/status-go/services/wallet/bigint" @@ -68,14 +70,14 @@ type Transactor struct { sendTxTimeout time.Duration rpcCallTimeout time.Duration networkID uint64 - log log.Logger + logger *zap.Logger } // NewTransactor returns a new Manager. func NewTransactor() *Transactor { return &Transactor{ sendTxTimeout: sendTxTimeout, - log: log.New("package", "status-go/transactions.Manager"), + logger: logutils.ZapLogger().Named("transactor"), } } @@ -547,21 +549,21 @@ func (t *Transactor) buildTransactionWithOverrides(nonce uint64, value *big.Int, } func (t *Transactor) logNewTx(args SendTxArgs, gas uint64, gasPrice *big.Int, value *big.Int) { - t.log.Info("New transaction", - "From", args.From, - "To", *args.To, - "Gas", gas, - "GasPrice", gasPrice, - "Value", value, + t.logger.Info("New transaction", + zap.Stringer("From", args.From), + zap.Stringer("To", args.To), + zap.Uint64("Gas", gas), + zap.Stringer("GasPrice", gasPrice), + zap.Stringer("Value", value), ) } func (t *Transactor) logNewContract(args SendTxArgs, gas uint64, gasPrice *big.Int, value *big.Int, nonce uint64) { - t.log.Info("New contract", - "From", args.From, - "Gas", gas, - "GasPrice", gasPrice, - "Value", value, - "Contract address", crypto.CreateAddress(args.From, nonce), + t.logger.Info("New contract", + zap.Stringer("From", args.From), + zap.Uint64("Gas", gas), + zap.Stringer("GasPrice", gasPrice), + zap.Stringer("Value", value), + zap.Stringer("Contract address", crypto.CreateAddress(args.From, nonce)), ) } diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE new file mode 100644 index 00000000000..29e1ab6b05f --- /dev/null +++ b/vendor/github.com/segmentio/asm/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/asm/ascii/ascii.go b/vendor/github.com/segmentio/asm/ascii/ascii.go new file mode 100644 index 00000000000..4805146dd6b --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/ascii.go @@ -0,0 +1,53 @@ +package ascii + +import _ "github.com/segmentio/asm/cpu" + +// https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord +const ( + hasLessConstL64 = (^uint64(0)) / 255 + hasLessConstR64 = hasLessConstL64 * 128 + + hasLessConstL32 = (^uint32(0)) / 255 + hasLessConstR32 = hasLessConstL32 * 128 + + hasMoreConstL64 = (^uint64(0)) / 255 + hasMoreConstR64 = hasMoreConstL64 * 128 + + hasMoreConstL32 = (^uint32(0)) / 255 + hasMoreConstR32 = hasMoreConstL32 * 128 +) + +func hasLess64(x, n uint64) bool { + return ((x - (hasLessConstL64 * n)) & ^x & hasLessConstR64) != 0 +} + +func hasLess32(x, n uint32) bool { + return ((x - (hasLessConstL32 * n)) & ^x & hasLessConstR32) != 0 +} + +func hasMore64(x, n uint64) bool { + return (((x + (hasMoreConstL64 * (127 - n))) | x) & hasMoreConstR64) != 0 +} + +func hasMore32(x, n uint32) bool { + return (((x + (hasMoreConstL32 * (127 - n))) | x) & hasMoreConstR32) != 0 +} + +var lowerCase = [256]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +} diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold.go b/vendor/github.com/segmentio/asm/ascii/equal_fold.go new file mode 100644 index 00000000000..d90d8cafccf --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold.go @@ -0,0 +1,30 @@ +package ascii + +import ( + "github.com/segmentio/asm/internal/unsafebytes" +) + +// EqualFold is a version of bytes.EqualFold designed to work on ASCII input +// instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFold(a, b []byte) bool { + return EqualFoldString(unsafebytes.String(a), unsafebytes.String(b)) +} + +func HasPrefixFold(s, prefix []byte) bool { + return len(s) >= len(prefix) && EqualFold(s[:len(prefix)], prefix) +} + +func HasSuffixFold(s, suffix []byte) bool { + return len(s) >= len(suffix) && EqualFold(s[len(s)-len(suffix):], suffix) +} + +func HasPrefixFoldString(s, prefix string) bool { + return len(s) >= len(prefix) && EqualFoldString(s[:len(prefix)], prefix) +} + +func HasSuffixFoldString(s, suffix string) bool { + return len(s) >= len(suffix) && EqualFoldString(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go new file mode 100644 index 00000000000..07cf6cdb486 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go @@ -0,0 +1,13 @@ +// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a string, b string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s new file mode 100644 index 00000000000..34495a622b4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s @@ -0,0 +1,304 @@ +// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func EqualFoldString(a string, b string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT Ā·EqualFoldString(SB), NOSPLIT, $0-33 + MOVQ a_base+0(FP), CX + MOVQ a_len+8(FP), DX + MOVQ b_base+16(FP), BX + CMPQ DX, b_len+24(FP) + JNE done + XORQ AX, AX + CMPQ DX, $0x10 + JB init_x86 + BTL $0x08, githubĀ·comāˆ•segmentioāˆ•asmāˆ•cpuĀ·X86+0(SB) + JCS init_avx + +init_x86: + LEAQ githubĀ·comāˆ•segmentioāˆ•asmāˆ•asciiĀ·lowerCase+0(SB), R9 + XORL SI, SI + +cmp8: + CMPQ DX, $0x08 + JB cmp7 + MOVBLZX (CX)(AX*1), DI + MOVBLZX (BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 1(CX)(AX*1), DI + MOVBLZX 1(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 2(CX)(AX*1), DI + MOVBLZX 2(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 3(CX)(AX*1), DI + MOVBLZX 3(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 4(CX)(AX*1), DI + MOVBLZX 4(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 5(CX)(AX*1), DI + MOVBLZX 5(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 6(CX)(AX*1), DI + MOVBLZX 6(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 7(CX)(AX*1), DI + MOVBLZX 7(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + JNE done + ADDQ $0x08, AX + SUBQ $0x08, DX + JMP cmp8 + +cmp7: + CMPQ DX, $0x07 + JB cmp6 + MOVBLZX 6(CX)(AX*1), DI + MOVBLZX 6(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp6: + CMPQ DX, $0x06 + JB cmp5 + MOVBLZX 5(CX)(AX*1), DI + MOVBLZX 5(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp5: + CMPQ DX, $0x05 + JB cmp4 + MOVBLZX 4(CX)(AX*1), DI + MOVBLZX 4(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp4: + CMPQ DX, $0x04 + JB cmp3 + MOVBLZX 3(CX)(AX*1), DI + MOVBLZX 3(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp3: + CMPQ DX, $0x03 + JB cmp2 + MOVBLZX 2(CX)(AX*1), DI + MOVBLZX 2(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp2: + CMPQ DX, $0x02 + JB cmp1 + MOVBLZX 1(CX)(AX*1), DI + MOVBLZX 1(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp1: + CMPQ DX, $0x01 + JB success + MOVBLZX (CX)(AX*1), DI + MOVBLZX (BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +done: + SETEQ ret+32(FP) + RET + +success: + MOVB $0x01, ret+32(FP) + RET + +init_avx: + MOVB $0x20, SI + PINSRB $0x00, SI, X12 + VPBROADCASTB X12, Y12 + MOVB $0x1f, SI + PINSRB $0x00, SI, X13 + VPBROADCASTB X13, Y13 + MOVB $0x9a, SI + PINSRB $0x00, SI, X14 + VPBROADCASTB X14, Y14 + MOVB $0x01, SI + PINSRB $0x00, SI, X15 + VPBROADCASTB X15, Y15 + +cmp128: + CMPQ DX, $0x80 + JB cmp64 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU 32(CX)(AX*1), Y1 + VMOVDQU 64(CX)(AX*1), Y2 + VMOVDQU 96(CX)(AX*1), Y3 + VMOVDQU (BX)(AX*1), Y4 + VMOVDQU 32(BX)(AX*1), Y5 + VMOVDQU 64(BX)(AX*1), Y6 + VMOVDQU 96(BX)(AX*1), Y7 + VXORPD Y0, Y4, Y4 + VPCMPEQB Y12, Y4, Y8 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y8, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y4, Y0, Y0 + VXORPD Y1, Y5, Y5 + VPCMPEQB Y12, Y5, Y9 + VORPD Y12, Y1, Y1 + VPADDB Y13, Y1, Y1 + VPCMPGTB Y1, Y14, Y1 + VPAND Y9, Y1, Y1 + VPAND Y15, Y1, Y1 + VPSLLW $0x05, Y1, Y1 + VPCMPEQB Y5, Y1, Y1 + VXORPD Y2, Y6, Y6 + VPCMPEQB Y12, Y6, Y10 + VORPD Y12, Y2, Y2 + VPADDB Y13, Y2, Y2 + VPCMPGTB Y2, Y14, Y2 + VPAND Y10, Y2, Y2 + VPAND Y15, Y2, Y2 + VPSLLW $0x05, Y2, Y2 + VPCMPEQB Y6, Y2, Y2 + VXORPD Y3, Y7, Y7 + VPCMPEQB Y12, Y7, Y11 + VORPD Y12, Y3, Y3 + VPADDB Y13, Y3, Y3 + VPCMPGTB Y3, Y14, Y3 + VPAND Y11, Y3, Y3 + VPAND Y15, Y3, Y3 + VPSLLW $0x05, Y3, Y3 + VPCMPEQB Y7, Y3, Y3 + VPAND Y1, Y0, Y0 + VPAND Y3, Y2, Y2 + VPAND Y2, Y0, Y0 + ADDQ $0x80, AX + SUBQ $0x80, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + JMP cmp128 + +cmp64: + CMPQ DX, $0x40 + JB cmp32 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU 32(CX)(AX*1), Y1 + VMOVDQU (BX)(AX*1), Y2 + VMOVDQU 32(BX)(AX*1), Y3 + VXORPD Y0, Y2, Y2 + VPCMPEQB Y12, Y2, Y4 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y4, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y2, Y0, Y0 + VXORPD Y1, Y3, Y3 + VPCMPEQB Y12, Y3, Y5 + VORPD Y12, Y1, Y1 + VPADDB Y13, Y1, Y1 + VPCMPGTB Y1, Y14, Y1 + VPAND Y5, Y1, Y1 + VPAND Y15, Y1, Y1 + VPSLLW $0x05, Y1, Y1 + VPCMPEQB Y3, Y1, Y1 + VPAND Y1, Y0, Y0 + ADDQ $0x40, AX + SUBQ $0x40, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + +cmp32: + CMPQ DX, $0x20 + JB cmp16 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU (BX)(AX*1), Y1 + VXORPD Y0, Y1, Y1 + VPCMPEQB Y12, Y1, Y2 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y2, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y1, Y0, Y0 + ADDQ $0x20, AX + SUBQ $0x20, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + +cmp16: + CMPQ DX, $0x10 + JLE cmp_tail + VMOVDQU (CX)(AX*1), X0 + VMOVDQU (BX)(AX*1), X1 + VXORPD X0, X1, X1 + VPCMPEQB X12, X1, X2 + VORPD X12, X0, X0 + VPADDB X13, X0, X0 + VPCMPGTB X0, X14, X0 + VPAND X2, X0, X0 + VPAND X15, X0, X0 + VPSLLW $0x05, X0, X0 + VPCMPEQB X1, X0, X0 + ADDQ $0x10, AX + SUBQ $0x10, DX + VPMOVMSKB X0, SI + XORL $0x0000ffff, SI + JNE done + +cmp_tail: + SUBQ $0x10, DX + ADDQ DX, AX + VMOVDQU (CX)(AX*1), X0 + VMOVDQU (BX)(AX*1), X1 + VXORPD X0, X1, X1 + VPCMPEQB X12, X1, X2 + VORPD X12, X0, X0 + VPADDB X13, X0, X0 + VPCMPGTB X0, X14, X0 + VPAND X2, X0, X0 + VPAND X15, X0, X0 + VPSLLW $0x05, X0, X0 + VPCMPEQB X1, X0, X0 + VPMOVMSKB X0, AX + XORL $0x0000ffff, AX + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go b/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go new file mode 100644 index 00000000000..1ae5a13a5b8 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go @@ -0,0 +1,60 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a, b string) bool { + if len(a) != len(b) { + return false + } + + var cmp byte + + for len(a) >= 8 { + cmp |= lowerCase[a[0]] ^ lowerCase[b[0]] + cmp |= lowerCase[a[1]] ^ lowerCase[b[1]] + cmp |= lowerCase[a[2]] ^ lowerCase[b[2]] + cmp |= lowerCase[a[3]] ^ lowerCase[b[3]] + cmp |= lowerCase[a[4]] ^ lowerCase[b[4]] + cmp |= lowerCase[a[5]] ^ lowerCase[b[5]] + cmp |= lowerCase[a[6]] ^ lowerCase[b[6]] + cmp |= lowerCase[a[7]] ^ lowerCase[b[7]] + + if cmp != 0 { + return false + } + + a = a[8:] + b = b[8:] + } + + switch len(a) { + case 7: + cmp |= lowerCase[a[6]] ^ lowerCase[b[6]] + fallthrough + case 6: + cmp |= lowerCase[a[5]] ^ lowerCase[b[5]] + fallthrough + case 5: + cmp |= lowerCase[a[4]] ^ lowerCase[b[4]] + fallthrough + case 4: + cmp |= lowerCase[a[3]] ^ lowerCase[b[3]] + fallthrough + case 3: + cmp |= lowerCase[a[2]] ^ lowerCase[b[2]] + fallthrough + case 2: + cmp |= lowerCase[a[1]] ^ lowerCase[b[1]] + fallthrough + case 1: + cmp |= lowerCase[a[0]] ^ lowerCase[b[0]] + } + + return cmp == 0 +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid.go b/vendor/github.com/segmentio/asm/ascii/valid.go new file mode 100644 index 00000000000..a5168ef5951 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid.go @@ -0,0 +1,18 @@ +package ascii + +import "github.com/segmentio/asm/internal/unsafebytes" + +// Valid returns true if b contains only ASCII characters. +func Valid(b []byte) bool { + return ValidString(unsafebytes.String(b)) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidByte(b byte) bool { + return b <= 0x7f +} + +// ValidBytes returns true if b is an ASCII character. +func ValidRune(r rune) bool { + return r <= 0x7f +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_amd64.go b/vendor/github.com/segmentio/asm/ascii/valid_amd64.go new file mode 100644 index 00000000000..72dc7b435de --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_amd64.go @@ -0,0 +1,9 @@ +// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/valid_amd64.s b/vendor/github.com/segmentio/asm/ascii/valid_amd64.s new file mode 100644 index 00000000000..0214b0ce9d0 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_amd64.s @@ -0,0 +1,132 @@ +// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func ValidString(s string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT Ā·ValidString(SB), NOSPLIT, $0-17 + MOVQ s_base+0(FP), AX + MOVQ s_len+8(FP), CX + MOVQ $0x8080808080808080, DX + CMPQ CX, $0x10 + JB cmp8 + BTL $0x08, githubĀ·comāˆ•segmentioāˆ•asmāˆ•cpuĀ·X86+0(SB) + JCS init_avx + +cmp8: + CMPQ CX, $0x08 + JB cmp4 + TESTQ DX, (AX) + JNZ invalid + ADDQ $0x08, AX + SUBQ $0x08, CX + JMP cmp8 + +cmp4: + CMPQ CX, $0x04 + JB cmp3 + TESTL $0x80808080, (AX) + JNZ invalid + ADDQ $0x04, AX + SUBQ $0x04, CX + +cmp3: + CMPQ CX, $0x03 + JB cmp2 + MOVWLZX (AX), CX + MOVBLZX 2(AX), AX + SHLL $0x10, AX + ORL CX, AX + TESTL $0x80808080, AX + JMP done + +cmp2: + CMPQ CX, $0x02 + JB cmp1 + TESTW $0x8080, (AX) + JMP done + +cmp1: + CMPQ CX, $0x00 + JE done + TESTB $0x80, (AX) + +done: + SETEQ ret+16(FP) + RET + +invalid: + MOVB $0x00, ret+16(FP) + RET + +init_avx: + PINSRQ $0x00, DX, X4 + VPBROADCASTQ X4, Y4 + +cmp256: + CMPQ CX, $0x00000100 + JB cmp128 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VMOVDQU 64(AX), Y1 + VPOR 96(AX), Y1, Y1 + VMOVDQU 128(AX), Y2 + VPOR 160(AX), Y2, Y2 + VMOVDQU 192(AX), Y3 + VPOR 224(AX), Y3, Y3 + VPOR Y1, Y0, Y0 + VPOR Y3, Y2, Y2 + VPOR Y2, Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x00000100, AX + SUBQ $0x00000100, CX + JMP cmp256 + +cmp128: + CMPQ CX, $0x80 + JB cmp64 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VMOVDQU 64(AX), Y1 + VPOR 96(AX), Y1, Y1 + VPOR Y1, Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x80, AX + SUBQ $0x80, CX + +cmp64: + CMPQ CX, $0x40 + JB cmp32 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x40, AX + SUBQ $0x40, CX + +cmp32: + CMPQ CX, $0x20 + JB cmp16 + VPTEST (AX), Y4 + JNZ invalid + ADDQ $0x20, AX + SUBQ $0x20, CX + +cmp16: + CMPQ CX, $0x10 + JLE cmp_tail + VPTEST (AX), X4 + JNZ invalid + ADDQ $0x10, AX + SUBQ $0x10, CX + +cmp_tail: + SUBQ $0x10, CX + ADDQ CX, AX + VPTEST (AX), X4 + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/valid_default.go b/vendor/github.com/segmentio/asm/ascii/valid_default.go new file mode 100644 index 00000000000..715a090d495 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_default.go @@ -0,0 +1,48 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +import ( + "unsafe" +) + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool { + p := *(*unsafe.Pointer)(unsafe.Pointer(&s)) + i := uintptr(0) + n := uintptr(len(s)) + + for i+8 <= n { + if (*(*uint64)(unsafe.Pointer(uintptr(p) + i)) & 0x8080808080808080) != 0 { + return false + } + i += 8 + } + + if i+4 <= n { + if (*(*uint32)(unsafe.Pointer(uintptr(p) + i)) & 0x80808080) != 0 { + return false + } + i += 4 + } + + if i == n { + return true + } + + p = unsafe.Pointer(uintptr(p) + i) + + var x uint32 + switch n - i { + case 3: + x = uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16 + case 2: + x = uint32(*(*uint16)(p)) + case 1: + x = uint32(*(*uint8)(p)) + default: + return true + } + return (x & 0x80808080) == 0 +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print.go b/vendor/github.com/segmentio/asm/ascii/valid_print.go new file mode 100644 index 00000000000..aa0db7f6551 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print.go @@ -0,0 +1,18 @@ +package ascii + +import "github.com/segmentio/asm/internal/unsafebytes" + +// ValidPrint returns true if b contains only printable ASCII characters. +func ValidPrint(b []byte) bool { + return ValidPrintString(unsafebytes.String(b)) +} + +// ValidPrintBytes returns true if b is an ASCII character. +func ValidPrintByte(b byte) bool { + return 0x20 <= b && b <= 0x7e +} + +// ValidPrintBytes returns true if b is an ASCII character. +func ValidPrintRune(r rune) bool { + return 0x20 <= r && r <= 0x7e +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go new file mode 100644 index 00000000000..b14626666d4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go @@ -0,0 +1,9 @@ +// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// ValidPrintString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s new file mode 100644 index 00000000000..bc2e20a23b6 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s @@ -0,0 +1,185 @@ +// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func ValidPrintString(s string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT Ā·ValidPrintString(SB), NOSPLIT, $0-17 + MOVQ s_base+0(FP), AX + MOVQ s_len+8(FP), CX + CMPQ CX, $0x10 + JB init_x86 + BTL $0x08, githubĀ·comāˆ•segmentioāˆ•asmāˆ•cpuĀ·X86+0(SB) + JCS init_avx + +init_x86: + CMPQ CX, $0x08 + JB cmp4 + MOVQ $0xdfdfdfdfdfdfdfe0, DX + MOVQ $0x0101010101010101, BX + MOVQ $0x8080808080808080, SI + +cmp8: + MOVQ (AX), DI + MOVQ DI, R8 + LEAQ (DI)(DX*1), R9 + NOTQ R8 + ANDQ R8, R9 + LEAQ (DI)(BX*1), R8 + ORQ R8, DI + ORQ R9, DI + ADDQ $0x08, AX + SUBQ $0x08, CX + TESTQ SI, DI + JNE done + CMPQ CX, $0x08 + JB cmp4 + JMP cmp8 + +cmp4: + CMPQ CX, $0x04 + JB cmp3 + MOVL (AX), DX + MOVL DX, BX + LEAL 3755991008(DX), SI + NOTL BX + ANDL BX, SI + LEAL 16843009(DX), BX + ORL BX, DX + ORL SI, DX + ADDQ $0x04, AX + SUBQ $0x04, CX + TESTL $0x80808080, DX + JNE done + +cmp3: + CMPQ CX, $0x03 + JB cmp2 + MOVWLZX (AX), DX + MOVBLZX 2(AX), AX + SHLL $0x10, AX + ORL DX, AX + ORL $0x20000000, AX + JMP final + +cmp2: + CMPQ CX, $0x02 + JB cmp1 + MOVWLZX (AX), AX + ORL $0x20200000, AX + JMP final + +cmp1: + CMPQ CX, $0x00 + JE done + MOVBLZX (AX), AX + ORL $0x20202000, AX + +final: + MOVL AX, CX + LEAL 3755991008(AX), DX + NOTL CX + ANDL CX, DX + LEAL 16843009(AX), CX + ORL CX, AX + ORL DX, AX + TESTL $0x80808080, AX + +done: + SETEQ ret+16(FP) + RET + +init_avx: + MOVB $0x1f, DL + PINSRB $0x00, DX, X8 + VPBROADCASTB X8, Y8 + MOVB $0x7e, DL + PINSRB $0x00, DX, X9 + VPBROADCASTB X9, Y9 + +cmp128: + CMPQ CX, $0x80 + JB cmp64 + VMOVDQU (AX), Y0 + VMOVDQU 32(AX), Y1 + VMOVDQU 64(AX), Y2 + VMOVDQU 96(AX), Y3 + VPCMPGTB Y8, Y0, Y4 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y4, Y0, Y0 + VPCMPGTB Y8, Y1, Y5 + VPCMPGTB Y9, Y1, Y1 + VPANDN Y5, Y1, Y1 + VPCMPGTB Y8, Y2, Y6 + VPCMPGTB Y9, Y2, Y2 + VPANDN Y6, Y2, Y2 + VPCMPGTB Y8, Y3, Y7 + VPCMPGTB Y9, Y3, Y3 + VPANDN Y7, Y3, Y3 + VPAND Y1, Y0, Y0 + VPAND Y3, Y2, Y2 + VPAND Y2, Y0, Y0 + ADDQ $0x80, AX + SUBQ $0x80, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + JMP cmp128 + +cmp64: + CMPQ CX, $0x40 + JB cmp32 + VMOVDQU (AX), Y0 + VMOVDQU 32(AX), Y1 + VPCMPGTB Y8, Y0, Y2 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y2, Y0, Y0 + VPCMPGTB Y8, Y1, Y3 + VPCMPGTB Y9, Y1, Y1 + VPANDN Y3, Y1, Y1 + VPAND Y1, Y0, Y0 + ADDQ $0x40, AX + SUBQ $0x40, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + +cmp32: + CMPQ CX, $0x20 + JB cmp16 + VMOVDQU (AX), Y0 + VPCMPGTB Y8, Y0, Y1 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y1, Y0, Y0 + ADDQ $0x20, AX + SUBQ $0x20, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + +cmp16: + CMPQ CX, $0x10 + JLE cmp_tail + VMOVDQU (AX), X0 + VPCMPGTB X8, X0, X1 + VPCMPGTB X9, X0, X0 + VPANDN X1, X0, X0 + ADDQ $0x10, AX + SUBQ $0x10, CX + VPMOVMSKB X0, DX + XORL $0x0000ffff, DX + JNE done + +cmp_tail: + SUBQ $0x10, CX + ADDQ CX, AX + VMOVDQU (AX), X0 + VPCMPGTB X8, X0, X1 + VPCMPGTB X9, X0, X0 + VPANDN X1, X0, X0 + VPMOVMSKB X0, DX + XORL $0x0000ffff, DX + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_default.go b/vendor/github.com/segmentio/asm/ascii/valid_print_default.go new file mode 100644 index 00000000000..c4dc748b083 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_default.go @@ -0,0 +1,46 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +import "unsafe" + +// ValidString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool { + p := *(*unsafe.Pointer)(unsafe.Pointer(&s)) + i := uintptr(0) + n := uintptr(len(s)) + + for i+8 <= n { + if hasLess64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x7e) { + return false + } + i += 8 + } + + if i+4 <= n { + if hasLess32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x7e) { + return false + } + i += 4 + } + + if i == n { + return true + } + + p = unsafe.Pointer(uintptr(p) + i) + + var x uint32 + switch n - i { + case 3: + x = 0x20000000 | uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16 + case 2: + x = 0x20200000 | uint32(*(*uint16)(p)) + case 1: + x = 0x20202000 | uint32(*(*uint8)(p)) + default: + return true + } + return !(hasLess32(x, 0x20) || hasMore32(x, 0x7e)) +} diff --git a/vendor/github.com/segmentio/asm/base64/base64.go b/vendor/github.com/segmentio/asm/base64/base64.go new file mode 100644 index 00000000000..dd2128d4a95 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64.go @@ -0,0 +1,67 @@ +package base64 + +import ( + "encoding/base64" +) + +const ( + StdPadding rune = base64.StdPadding + NoPadding rune = base64.NoPadding + + encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+," + + letterRange = int8('Z' - 'A' + 1) +) + +// StdEncoding is the standard base64 encoding, as defined in RFC 4648. +var StdEncoding = NewEncoding(encodeStd) + +// URLEncoding is the alternate base64 encoding defined in RFC 4648. +// It is typically used in URLs and file names. +var URLEncoding = NewEncoding(encodeURL) + +// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2. +// This is the same as StdEncoding but omits padding characters. +var RawStdEncoding = StdEncoding.WithPadding(NoPadding) + +// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648. +// This is the same as URLEncoding but omits padding characters. +var RawURLEncoding = URLEncoding.WithPadding(NoPadding) + +// NewEncoding returns a new padded Encoding defined by the given alphabet, +// which must be a 64-byte string that does not contain the padding character +// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet +// cannot be abitrary, and it must follow one of the know standard encoding +// variants. +// +// Required alphabet values: +// * [0,26): characters 'A'..'Z' +// * [26,52): characters 'a'..'z' +// * [52,62): characters '0'..'9' +// Flexible alphabet value options: +// * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/' +// * RFC 4648 URI: '-' and '_' +// * RFC 3501: '+' and ',' +// +// The resulting Encoding uses the default padding character ('='), which may +// be changed or disabled via WithPadding. The padding characters is urestricted, +// but it must be a character outside of the encoder alphabet. +func NewEncoding(encoder string) *Encoding { + if len(encoder) != 64 { + panic("encoding alphabet is not 64-bytes long") + } + + if _, ok := allowedEncoding[encoder]; !ok { + panic("non-standard encoding alphabets are not supported") + } + + return newEncoding(encoder) +} + +var allowedEncoding = map[string]struct{}{ + encodeStd: {}, + encodeURL: {}, + encodeIMAP: {}, +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_amd64.go b/vendor/github.com/segmentio/asm/base64/base64_amd64.go new file mode 100644 index 00000000000..e4940d781cf --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_amd64.go @@ -0,0 +1,160 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package base64 + +import ( + "encoding/base64" + + "github.com/segmentio/asm/cpu" + "github.com/segmentio/asm/cpu/x86" + "github.com/segmentio/asm/internal/unsafebytes" +) + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding struct { + enc func(dst []byte, src []byte, lut *int8) (int, int) + enclut [32]int8 + + dec func(dst []byte, src []byte, lut *int8) (int, int) + declut [48]int8 + + base *base64.Encoding +} + +const ( + minEncodeLen = 28 + minDecodeLen = 45 +) + +func newEncoding(encoder string) *Encoding { + e := &Encoding{base: base64.NewEncoding(encoder)} + if cpu.X86.Has(x86.AVX2) { + e.enableEncodeAVX2(encoder) + e.enableDecodeAVX2(encoder) + } + return e +} + +func (e *Encoding) enableEncodeAVX2(encoder string) { + // Translate values 0..63 to the Base64 alphabet. There are five sets: + // + // From To Add Index Example + // [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz + // [52..61] [48..57] -4 [2..11] 0123456789 + // [62] [43] -19 12 + + // [63] [47] -16 13 / + tab := [32]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange} + for i, ch := range encoder[2*letterRange:] { + tab[2+i] = int8(ch) - 2*letterRange - int8(i) + } + + e.enc = encodeAVX2 + e.enclut = tab +} + +func (e *Encoding) enableDecodeAVX2(encoder string) { + c62, c63 := int8(encoder[62]), int8(encoder[63]) + url := c63 == '_' + if url { + c63 = '/' + } + + // Translate values from the Base64 alphabet using five sets. Values outside + // of these ranges are considered invalid: + // + // From To Add Index Example + // [47] [63] +16 1 / + // [43] [62] +19 2 + + // [48..57] [52..61] +4 3 0123456789 + // [65..90] [0..25] -65 4,5 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [97..122] [26..51] -71 6,7 abcdefghijklmnopqrstuvwxyz + tab := [48]int8{ + 0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B, + } + tab[(c62&15)+16] = 0x1A + tab[(c63&15)+16] = 0x1A + + if url { + e.dec = decodeAVX2URI + } else { + e.dec = decodeAVX2 + } + e.declut = tab +} + +// WithPadding creates a duplicate Encoding updated with a specified padding +// character, or NoPadding to disable padding. The padding character must not +// be contained in the encoding alphabet, must not be '\r' or '\n', and must +// be no greater than '\xFF'. +func (enc Encoding) WithPadding(padding rune) *Encoding { + enc.base = enc.base.WithPadding(padding) + return &enc +} + +// Strict creates a duplicate encoding updated with strict decoding enabled. +// This requires that trailing padding bits are zero. +func (enc Encoding) Strict() *Encoding { + enc.base = enc.base.Strict() + return &enc +} + +// Encode encodes src using the defined encoding alphabet. +// This will write EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) Encode(dst, src []byte) { + if len(src) >= minEncodeLen && enc.enc != nil { + d, s := enc.enc(dst, src, &enc.enclut[0]) + dst = dst[d:] + src = src[s:] + } + enc.base.Encode(dst, src) +} + +// Encode encodes src using the encoding enc, writing +// EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) EncodeToString(src []byte) string { + buf := make([]byte, enc.base.EncodedLen(len(src))) + enc.Encode(buf, src) + return string(buf) +} + +// EncodedLen calculates the base64-encoded byte length for a message +// of length n. +func (enc *Encoding) EncodedLen(n int) int { + return enc.base.EncodedLen(n) +} + +// Decode decodes src using the defined encoding alphabet. +// This will write DecodedLen(len(src)) bytes to dst and return the number of +// bytes written. +func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { + var d, s int + if len(src) >= minDecodeLen && enc.dec != nil { + d, s = enc.dec(dst, src, &enc.declut[0]) + dst = dst[d:] + src = src[s:] + } + n, err = enc.base.Decode(dst, src) + n += d + return +} + +// DecodeString decodes the base64 encoded string s, returns the decoded +// value as bytes. +func (enc *Encoding) DecodeString(s string) ([]byte, error) { + src := unsafebytes.BytesOf(s) + dst := make([]byte, enc.base.DecodedLen(len(s))) + n, err := enc.Decode(dst, src) + return dst[:n], err +} + +// DecodedLen calculates the decoded byte length for a base64-encoded message +// of length n. +func (enc *Encoding) DecodedLen(n int) int { + return enc.base.DecodedLen(n) +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_default.go b/vendor/github.com/segmentio/asm/base64/base64_default.go new file mode 100644 index 00000000000..f5d3d6470a8 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_default.go @@ -0,0 +1,14 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package base64 + +import "encoding/base64" + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding = base64.Encoding + +func newEncoding(encoder string) *Encoding { + return base64.NewEncoding(encoder) +} diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.go b/vendor/github.com/segmentio/asm/base64/decode_amd64.go new file mode 100644 index 00000000000..1dae5b431b1 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.go @@ -0,0 +1,10 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package base64 + +func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) + +func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.s b/vendor/github.com/segmentio/asm/base64/decode_amd64.s new file mode 100644 index 00000000000..cc6c779dae4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.s @@ -0,0 +1,144 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010 +DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010 +GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140 +GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000 +GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000 +DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000 +GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16 + +DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102 +DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08 +DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405 +DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000 +GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32 + +// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT Ā·decodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X8 + VPBROADCASTB X8, Y8 + XORQ CX, CX + XORQ BX, BX + VPXOR Y7, Y7, Y7 + VPERMQ $0x44, (SI), Y6 + VPERMQ $0x44, 16(SI), Y4 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y5 + +loop: + VMOVDQU (DX)(BX*1), Y0 + VPSRLD $0x04, Y0, Y2 + VPAND Y8, Y0, Y3 + VPSHUFB Y3, Y4, Y3 + VPAND Y8, Y2, Y2 + VPSHUFB Y2, Y5, Y9 + VPTEST Y9, Y3 + JNE done + VPCMPEQB Y8, Y0, Y3 + VPADDB Y3, Y2, Y2 + VPSHUFB Y2, Y6, Y2 + VPADDB Y0, Y2, Y0 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y0, Y0 + VPMADDWD b64_dec_madd2<>+0(SB), Y0, Y0 + VEXTRACTI128 $0x01, Y0, X1 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X1, X1 + VPSHUFB b64_dec_shuf<>+0(SB), Y0, Y0 + VPBLENDD $0x08, Y1, Y0, Y1 + VPBLENDD $0xc0, Y7, Y1, Y1 + VMOVDQU Y1, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT Ā·decodeAVX2URI(SB), NOSPLIT, $0-72 + MOVB $0x2f, AL + PINSRB $0x00, AX, X0 + VPBROADCASTB X0, Y0 + MOVB $0x5f, AL + PINSRB $0x00, AX, X1 + VPBROADCASTB X1, Y1 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X10 + VPBROADCASTB X10, Y10 + XORQ CX, CX + XORQ BX, BX + VPXOR Y9, Y9, Y9 + VPERMQ $0x44, (SI), Y8 + VPERMQ $0x44, 16(SI), Y6 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y7 + +loop: + VMOVDQU (DX)(BX*1), Y2 + VPCMPEQB Y2, Y1, Y4 + VPBLENDVB Y4, Y0, Y2, Y2 + VPSRLD $0x04, Y2, Y4 + VPAND Y10, Y2, Y5 + VPSHUFB Y5, Y6, Y5 + VPAND Y10, Y4, Y4 + VPSHUFB Y4, Y7, Y11 + VPTEST Y11, Y5 + JNE done + VPCMPEQB Y10, Y2, Y5 + VPADDB Y5, Y4, Y4 + VPSHUFB Y4, Y8, Y4 + VPADDB Y2, Y4, Y2 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y2, Y2 + VPMADDWD b64_dec_madd2<>+0(SB), Y2, Y2 + VEXTRACTI128 $0x01, Y2, X3 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X3, X3 + VPSHUFB b64_dec_shuf<>+0(SB), Y2, Y2 + VPBLENDD $0x08, Y3, Y2, Y3 + VPBLENDD $0xc0, Y9, Y3, Y3 + VMOVDQU Y3, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.go b/vendor/github.com/segmentio/asm/base64/encode_amd64.go new file mode 100644 index 00000000000..c38060f7119 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.go @@ -0,0 +1,8 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package base64 + +func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.s b/vendor/github.com/segmentio/asm/base64/encode_amd64.s new file mode 100644 index 00000000000..2edd27aac88 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.s @@ -0,0 +1,88 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT Ā·encodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x33, CL + PINSRB $0x00, CX, X4 + VPBROADCASTB X4, Y4 + MOVB $0x19, CL + PINSRB $0x00, CX, X5 + VPBROADCASTB X5, Y5 + XORQ CX, CX + XORQ BX, BX + + // Load the 16-byte LUT into both lanes of the register + VPERMQ $0x44, (SI), Y3 + + // Load the first block using a mask to avoid potential fault + VMOVDQU b64_enc_load<>+0(SB), Y0 + VPMASKMOVD -4(DX)(BX*1), Y0, Y0 + +loop: + VPSHUFB b64_enc_shuf<>+0(SB), Y0, Y0 + VPAND b64_enc_mask1<>+0(SB), Y0, Y1 + VPSLLW $0x08, Y1, Y2 + VPSLLW $0x04, Y1, Y1 + VPBLENDW $0xaa, Y2, Y1, Y2 + VPAND b64_enc_mask2<>+0(SB), Y0, Y1 + VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0 + VPOR Y0, Y2, Y0 + VPSUBUSB Y4, Y0, Y1 + VPCMPGTB Y5, Y0, Y2 + VPSUBB Y2, Y1, Y1 + VPSHUFB Y1, Y3, Y1 + VPADDB Y0, Y1, Y0 + VMOVDQU Y0, (AX)(CX*1) + ADDQ $0x20, CX + ADDQ $0x18, BX + SUBQ $0x18, DI + CMPQ DI, $0x20 + JB done + VMOVDQU -4(DX)(BX*1), Y0 + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000 +DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000 +GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405 +DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b +DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001 +DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607 +GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0 +GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00 +GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040 +GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/cpu/arm/arm.go b/vendor/github.com/segmentio/asm/cpu/arm/arm.go new file mode 100644 index 00000000000..47c695a075f --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm/arm.go @@ -0,0 +1,80 @@ +package arm + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SWP Feature = 1 << iota // SWP instruction support + HALF // Half-word load and store support + THUMB // ARM Thumb instruction set + BIT26 // Address space limited to 26-bits + FASTMUL // 32-bit operand, 64-bit result multiplication support + FPA // Floating point arithmetic support + VFP // Vector floating point support + EDSP // DSP Extensions support + JAVA // Java instruction set + IWMMXT // Intel Wireless MMX technology support + CRUNCH // MaverickCrunch context switching and handling + THUMBEE // Thumb EE instruction set + NEON // NEON instruction set + VFPv3 // Vector floating point version 3 support + VFPv3D16 // Vector floating point version 3 D8-D15 + TLS // Thread local storage support + VFPv4 // Vector floating point version 4 support + IDIVA // Integer divide instruction support in ARM mode + IDIVT // Integer divide instruction support in Thumb mode + VFPD32 // Vector floating point version 3 D15-D31 + LPAE // Large Physical Address Extensions + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SWP, ARM.HasSWP) + cpu.set(HALF, ARM.HasHALF) + cpu.set(THUMB, ARM.HasTHUMB) + cpu.set(BIT26, ARM.Has26BIT) + cpu.set(FASTMUL, ARM.HasFASTMUL) + cpu.set(FPA, ARM.HasFPA) + cpu.set(VFP, ARM.HasVFP) + cpu.set(EDSP, ARM.HasEDSP) + cpu.set(JAVA, ARM.HasJAVA) + cpu.set(IWMMXT, ARM.HasIWMMXT) + cpu.set(CRUNCH, ARM.HasCRUNCH) + cpu.set(THUMBEE, ARM.HasTHUMBEE) + cpu.set(NEON, ARM.HasNEON) + cpu.set(VFPv3, ARM.HasVFPv3) + cpu.set(VFPv3D16, ARM.HasVFPv3D16) + cpu.set(TLS, ARM.HasTLS) + cpu.set(VFPv4, ARM.HasVFPv4) + cpu.set(IDIVA, ARM.HasIDIVA) + cpu.set(IDIVT, ARM.HasIDIVT) + cpu.set(VFPD32, ARM.HasVFPD32) + cpu.set(LPAE, ARM.HasLPAE) + cpu.set(EVTSTRM, ARM.HasEVTSTRM) + cpu.set(AES, ARM.HasAES) + cpu.set(PMULL, ARM.HasPMULL) + cpu.set(SHA1, ARM.HasSHA1) + cpu.set(SHA2, ARM.HasSHA2) + cpu.set(CRC32, ARM.HasCRC32) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go new file mode 100644 index 00000000000..0c5134c76ee --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go @@ -0,0 +1,74 @@ +package arm64 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + FP Feature = 1 << iota // Floating-point instruction set (always available) + ASIMD // Advanced SIMD (always available) + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation + ATOMICS // Atomic memory operation instruction set + FPHP // Half precision floating-point instruction set + ASIMDHP // Advanced SIMD half precision instruction set + CPUID // CPUID identification scheme registers + ASIMDRDM // Rounding double multiply add/subtract instruction set + JSCVT // Javascript conversion from floating-point to integer + FCMA // Floating-point multiplication and addition of complex numbers + LRCPC // Release Consistent processor consistent support + DCPOP // Persistent memory support + SHA3 // SHA3 hardware implementation + SM3 // SM3 hardware implementation + SM4 // SM4 hardware implementation + ASIMDDP // Advanced SIMD double precision instruction set + SHA512 // SHA512 hardware implementation + SVE // Scalable Vector Extensions + ASIMDFHM // Advanced SIMD multiplication FP16 to FP32 +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(FP, ARM64.HasFP) + cpu.set(ASIMD, ARM64.HasASIMD) + cpu.set(EVTSTRM, ARM64.HasEVTSTRM) + cpu.set(AES, ARM64.HasAES) + cpu.set(PMULL, ARM64.HasPMULL) + cpu.set(SHA1, ARM64.HasSHA1) + cpu.set(SHA2, ARM64.HasSHA2) + cpu.set(CRC32, ARM64.HasCRC32) + cpu.set(ATOMICS, ARM64.HasATOMICS) + cpu.set(FPHP, ARM64.HasFPHP) + cpu.set(ASIMDHP, ARM64.HasASIMDHP) + cpu.set(CPUID, ARM64.HasCPUID) + cpu.set(ASIMDRDM, ARM64.HasASIMDRDM) + cpu.set(JSCVT, ARM64.HasJSCVT) + cpu.set(FCMA, ARM64.HasFCMA) + cpu.set(LRCPC, ARM64.HasLRCPC) + cpu.set(DCPOP, ARM64.HasDCPOP) + cpu.set(SHA3, ARM64.HasSHA3) + cpu.set(SM3, ARM64.HasSM3) + cpu.set(SM4, ARM64.HasSM4) + cpu.set(ASIMDDP, ARM64.HasASIMDDP) + cpu.set(SHA512, ARM64.HasSHA512) + cpu.set(SVE, ARM64.HasSVE) + cpu.set(ASIMDFHM, ARM64.HasASIMDFHM) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/cpu.go b/vendor/github.com/segmentio/asm/cpu/cpu.go new file mode 100644 index 00000000000..6ddf4973f55 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpu.go @@ -0,0 +1,22 @@ +// Pakage cpu provides APIs to detect CPU features available at runtime. +package cpu + +import ( + "github.com/segmentio/asm/cpu/arm" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +var ( + // X86 is the bitset representing the set of the x86 instruction sets are + // supported by the CPU. + X86 = x86.ABI() + + // ARM is the bitset representing which parts of the arm instruction sets + // are supported by the CPU. + ARM = arm.ABI() + + // ARM64 is the bitset representing which parts of the arm64 instruction + // sets are supported by the CPU. + ARM64 = arm64.ABI() +) diff --git a/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go new file mode 100644 index 00000000000..0949d3d584d --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go @@ -0,0 +1,32 @@ +// Package cpuid provides generic types used to represent CPU features supported +// by the architecture. +package cpuid + +// CPU is a bitset of feature flags representing the capabilities of various CPU +// architeectures that this package provides optimized assembly routines for. +// +// The intent is to provide a stable ABI between the Go code that generate the +// assembly, and the program that uses the library functions. +type CPU uint64 + +// Feature represents a single CPU feature. +type Feature uint64 + +const ( + // None is a Feature value that has no CPU features enabled. + None Feature = 0 + // All is a Feature value that has all CPU features enabled. + All Feature = 0xFFFFFFFFFFFFFFFF +) + +func (cpu CPU) Has(feature Feature) bool { + return (Feature(cpu) & feature) == feature +} + +func (cpu *CPU) Set(feature Feature, enabled bool) { + if enabled { + *cpu |= CPU(feature) + } else { + *cpu &= ^CPU(feature) + } +} diff --git a/vendor/github.com/segmentio/asm/cpu/x86/x86.go b/vendor/github.com/segmentio/asm/cpu/x86/x86.go new file mode 100644 index 00000000000..9e93537583d --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/x86/x86.go @@ -0,0 +1,76 @@ +package x86 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SSE Feature = 1 << iota // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE41 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + AVX // AVX functions + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + CMOV // Conditional move +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE? + cpu.set(SSE2, X86.HasSSE2) + cpu.set(SSE3, X86.HasSSE3) + cpu.set(SSE41, X86.HasSSE41) + cpu.set(SSE42, X86.HasSSE42) + cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(SSSE3, X86.HasSSSE3) + cpu.set(AVX, X86.HasAVX) + cpu.set(AVX2, X86.HasAVX2) + cpu.set(AVX512BF16, X86.HasAVX512BF16) + cpu.set(AVX512BITALG, X86.HasAVX512BITALG) + cpu.set(AVX512BW, X86.HasAVX512BW) + cpu.set(AVX512CD, X86.HasAVX512CD) + cpu.set(AVX512DQ, X86.HasAVX512DQ) + cpu.set(AVX512ER, X86.HasAVX512ER) + cpu.set(AVX512F, X86.HasAVX512F) + cpu.set(AVX512IFMA, X86.HasAVX512IFMA) + cpu.set(AVX512PF, X86.HasAVX512PF) + cpu.set(AVX512VBMI, X86.HasAVX512VBMI) + cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2) + cpu.set(AVX512VL, X86.HasAVX512VL) + cpu.set(AVX512VNNI, X86.HasAVX512VNNI) + cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ) + cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV? + return cpu +} diff --git a/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go new file mode 100644 index 00000000000..913c9cc68b0 --- /dev/null +++ b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go @@ -0,0 +1,20 @@ +package unsafebytes + +import "unsafe" + +func Pointer(b []byte) *byte { + return *(**byte)(unsafe.Pointer(&b)) +} + +func String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func BytesOf(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)})) +} + +type sliceHeader struct { + str string + cap int +} diff --git a/vendor/github.com/segmentio/asm/keyset/keyset.go b/vendor/github.com/segmentio/asm/keyset/keyset.go new file mode 100644 index 00000000000..1943c5f7894 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset.go @@ -0,0 +1,40 @@ +package keyset + +import ( + "bytes" + + "github.com/segmentio/asm/cpu" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +// New prepares a set of keys for use with Lookup. +// +// An optimized routine is used if the processor supports AVX instructions and +// the maximum length of any of the keys is less than or equal to 16. If New +// returns nil, this indicates that an optimized routine is not available, and +// the caller should use a fallback. +func New(keys [][]byte) []byte { + maxWidth, hasNullByte := checkKeys(keys) + if hasNullByte || maxWidth > 16 || !(cpu.X86.Has(x86.AVX) || cpu.ARM64.Has(arm64.ASIMD)) { + return nil + } + + set := make([]byte, len(keys)*16) + for i, k := range keys { + copy(set[i*16:], k) + } + return set +} + +func checkKeys(keys [][]byte) (maxWidth int, hasNullByte bool) { + for _, k := range keys { + if len(k) > maxWidth { + maxWidth = len(k) + } + if bytes.IndexByte(k, 0) >= 0 { + hasNullByte = true + } + } + return +} diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go new file mode 100644 index 00000000000..9554ee67cac --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go @@ -0,0 +1,10 @@ +// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package keyset + +// Lookup searches for a key in a set of keys, returning its index if +// found. If the key cannot be found, the number of keys is returned. +func Lookup(keyset []byte, key []byte) int diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s new file mode 100644 index 00000000000..e27d2c45eae --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s @@ -0,0 +1,108 @@ +// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func Lookup(keyset []byte, key []byte) int +// Requires: AVX +TEXT Ā·Lookup(SB), NOSPLIT, $0-56 + MOVQ keyset_base+0(FP), AX + MOVQ keyset_len+8(FP), CX + SHRQ $0x04, CX + MOVQ key_base+24(FP), DX + MOVQ key_len+32(FP), BX + MOVQ key_cap+40(FP), SI + CMPQ BX, $0x10 + JA not_found + CMPQ SI, $0x10 + JB safe_load + +load: + VMOVUPS (DX), X0 + +prepare: + VPXOR X2, X2, X2 + VPCMPEQB X1, X1, X1 + LEAQ blend_masks<>+16(SB), DX + SUBQ BX, DX + VMOVUPS (DX), X3 + VPBLENDVB X3, X0, X2, X0 + XORQ DX, DX + MOVQ CX, BX + SHRQ $0x02, BX + SHLQ $0x02, BX + +bigloop: + CMPQ DX, BX + JE loop + VPCMPEQB (AX), X0, X8 + VPTEST X1, X8 + JCS done + VPCMPEQB 16(AX), X0, X9 + VPTEST X1, X9 + JCS found1 + VPCMPEQB 32(AX), X0, X10 + VPTEST X1, X10 + JCS found2 + VPCMPEQB 48(AX), X0, X11 + VPTEST X1, X11 + JCS found3 + ADDQ $0x04, DX + ADDQ $0x40, AX + JMP bigloop + +loop: + CMPQ DX, CX + JE done + VPCMPEQB (AX), X0, X2 + VPTEST X1, X2 + JCS done + INCQ DX + ADDQ $0x10, AX + JMP loop + JMP done + +found3: + INCQ DX + +found2: + INCQ DX + +found1: + INCQ DX + +done: + MOVQ DX, ret+48(FP) + RET + +not_found: + MOVQ CX, ret+48(FP) + RET + +safe_load: + MOVQ DX, SI + ANDQ $0x00000fff, SI + CMPQ SI, $0x00000ff0 + JBE load + MOVQ $0xfffffffffffffff0, SI + ADDQ BX, SI + VMOVUPS (DX)(SI*1), X0 + LEAQ shuffle_masks<>+16(SB), DX + SUBQ BX, DX + VMOVUPS (DX), X1 + VPSHUFB X1, X0, X0 + JMP prepare + +DATA blend_masks<>+0(SB)/8, $0xffffffffffffffff +DATA blend_masks<>+8(SB)/8, $0xffffffffffffffff +DATA blend_masks<>+16(SB)/8, $0x0000000000000000 +DATA blend_masks<>+24(SB)/8, $0x0000000000000000 +GLOBL blend_masks<>(SB), RODATA|NOPTR, $32 + +DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+8(SB)/8, $0x0f0e0d0c0b0a0908 +DATA shuffle_masks<>+16(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+24(SB)/8, $0x0f0e0d0c0b0a0908 +GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go new file mode 100644 index 00000000000..feafabef6f5 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go @@ -0,0 +1,8 @@ +//go:build !purego +// +build !purego + +package keyset + +// Lookup searches for a key in a set of keys, returning its index if +// found. If the key cannot be found, the number of keys is returned. +func Lookup(keyset []byte, key []byte) int diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s new file mode 100644 index 00000000000..20acb992728 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s @@ -0,0 +1,143 @@ +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func Lookup(keyset []byte, key []byte) int +TEXT Ā·Lookup(SB), NOSPLIT, $0-56 + MOVD keyset+0(FP), R0 + MOVD keyset_len+8(FP), R1 + MOVD key+24(FP), R2 + MOVD key_len+32(FP), R3 + MOVD key_cap+40(FP), R4 + + // None of the keys in the set are greater than 16 bytes, so if the input + // key is we can jump straight to not found. + CMP $16, R3 + BHI notfound + + // We'll be moving the keyset pointer (R0) forward as we compare keys, so + // make a copy of the starting point (R6). Also add the byte length (R1) to + // obtain a pointer to the end of the keyset (R5). + MOVD R0, R6 + ADD R0, R1, R5 + + // Prepare a 64-bit mask of all ones. + MOVD $-1, R7 + + // Prepare a vector of all zeroes. + VMOV ZR, V1.B16 + + // Check that it's safe to load 16 bytes of input. If cap(input)<16, jump + // to a check that determines whether a tail load is necessary (to avoid a + // page fault). + CMP $16, R4 + BLO safeload + +load: + // Load the input key (V0) and pad with zero bytes (V1). To blend the two + // vectors, we load a mask for the particular key length and then use TBL + // to select bytes from either V0 or V1. + VLD1 (R2), [V0.B16] + MOVD $blend_masks<>(SB), R10 + ADD R3<<4, R10, R10 + VLD1 (R10), [V2.B16] + VTBL V2.B16, [V0.B16, V1.B16], V3.B16 + +loop: + // Loop through each 16 byte key in the keyset. + CMP R0, R5 + BEQ notfound + + // Load and compare the next key. + VLD1.P 16(R0), [V4.B16] + VCMEQ V3.B16, V4.B16, V5.B16 + VMOV V5.D[0], R8 + VMOV V5.D[1], R9 + AND R8, R9, R9 + + // If the masks match, we found the key. + CMP R9, R7 + BEQ found + JMP loop + +found: + // If the key was found, take the position in the keyset and convert it + // to an index. The keyset pointer (R0) will be 1 key past the match, so + // subtract the starting pointer (R6), divide by 16 to convert from byte + // length to an index, and then subtract one. + SUB R6, R0, R0 + ADD R0>>4, ZR, R0 + SUB $1, R0, R0 + MOVD R0, ret+48(FP) + RET + +notfound: + // Return the number of keys in the keyset, which is the byte length (R1) + // divided by 16. + ADD R1>>4, ZR, R1 + MOVD R1, ret+48(FP) + RET + +safeload: + // Check if the input crosses a page boundary. If not, jump back. + AND $4095, R2, R12 + CMP $4080, R12 + BLS load + + // If it does cross a page boundary, we must assume that loading 16 bytes + // will cause a fault. Instead, we load the 16 bytes up to and including the + // key and then shuffle the key forward in the register. We can shuffle and + // pad with zeroes at the same time to avoid having to also blend (as load + // does). + MOVD $16, R12 + SUB R3, R12, R12 + SUB R12, R2, R2 + VLD1 (R2), [V0.B16] + MOVD $shuffle_masks<>(SB), R10 + ADD R12, R10, R10 + VLD1 (R10), [V2.B16] + VTBL V2.B16, [V0.B16, V1.B16], V3.B16 + JMP loop + +DATA blend_masks<>+0(SB)/8, $0x1010101010101010 +DATA blend_masks<>+8(SB)/8, $0x1010101010101010 +DATA blend_masks<>+16(SB)/8, $0x1010101010101000 +DATA blend_masks<>+24(SB)/8, $0x1010101010101010 +DATA blend_masks<>+32(SB)/8, $0x1010101010100100 +DATA blend_masks<>+40(SB)/8, $0x1010101010101010 +DATA blend_masks<>+48(SB)/8, $0x1010101010020100 +DATA blend_masks<>+56(SB)/8, $0x1010101010101010 +DATA blend_masks<>+64(SB)/8, $0x1010101003020100 +DATA blend_masks<>+72(SB)/8, $0x1010101010101010 +DATA blend_masks<>+80(SB)/8, $0x1010100403020100 +DATA blend_masks<>+88(SB)/8, $0x1010101010101010 +DATA blend_masks<>+96(SB)/8, $0x1010050403020100 +DATA blend_masks<>+104(SB)/8, $0x1010101010101010 +DATA blend_masks<>+112(SB)/8, $0x1006050403020100 +DATA blend_masks<>+120(SB)/8, $0x1010101010101010 +DATA blend_masks<>+128(SB)/8, $0x0706050403020100 +DATA blend_masks<>+136(SB)/8, $0x1010101010101010 +DATA blend_masks<>+144(SB)/8, $0x0706050403020100 +DATA blend_masks<>+152(SB)/8, $0x1010101010101008 +DATA blend_masks<>+160(SB)/8, $0x0706050403020100 +DATA blend_masks<>+168(SB)/8, $0x1010101010100908 +DATA blend_masks<>+176(SB)/8, $0x0706050403020100 +DATA blend_masks<>+184(SB)/8, $0x10101010100A0908 +DATA blend_masks<>+192(SB)/8, $0x0706050403020100 +DATA blend_masks<>+200(SB)/8, $0x101010100B0A0908 +DATA blend_masks<>+208(SB)/8, $0x0706050403020100 +DATA blend_masks<>+216(SB)/8, $0x1010100C0B0A0908 +DATA blend_masks<>+224(SB)/8, $0x0706050403020100 +DATA blend_masks<>+232(SB)/8, $0x10100D0C0B0A0908 +DATA blend_masks<>+240(SB)/8, $0x0706050403020100 +DATA blend_masks<>+248(SB)/8, $0x100E0D0C0B0A0908 +DATA blend_masks<>+256(SB)/8, $0x0706050403020100 +DATA blend_masks<>+264(SB)/8, $0x0F0E0D0C0B0A0908 +GLOBL blend_masks<>(SB), RODATA|NOPTR, $272 + +DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+8(SB)/8, $0x0F0E0D0C0B0A0908 +DATA shuffle_masks<>+16(SB)/8, $0x1010101010101010 +DATA shuffle_masks<>+24(SB)/8, $0x1010101010101010 +GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_default.go b/vendor/github.com/segmentio/asm/keyset/keyset_default.go new file mode 100644 index 00000000000..1fa7d3fc84b --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_default.go @@ -0,0 +1,19 @@ +//go:build purego || !(amd64 || arm64) +// +build purego !amd64,!arm64 + +package keyset + +func Lookup(keyset []byte, key []byte) int { + if len(key) > 16 { + return len(keyset) / 16 + } + var padded [16]byte + copy(padded[:], key) + + for i := 0; i < len(keyset); i += 16 { + if string(padded[:]) == string(keyset[i:i+16]) { + return i / 16 + } + } + return len(keyset) / 16 +} diff --git a/vendor/github.com/segmentio/encoding/LICENSE b/vendor/github.com/segmentio/encoding/LICENSE new file mode 100644 index 00000000000..1fbffdf72ad --- /dev/null +++ b/vendor/github.com/segmentio/encoding/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Segment.io, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/encoding/ascii/equal_fold.go b/vendor/github.com/segmentio/encoding/ascii/equal_fold.go new file mode 100644 index 00000000000..4207f171033 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/equal_fold.go @@ -0,0 +1,40 @@ +//go:generate go run equal_fold_asm.go -out equal_fold_amd64.s -stubs equal_fold_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// EqualFold is a version of bytes.EqualFold designed to work on ASCII input +// instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFold(a, b []byte) bool { + return ascii.EqualFold(a, b) +} + +func HasPrefixFold(s, prefix []byte) bool { + return ascii.HasPrefixFold(s, prefix) +} + +func HasSuffixFold(s, suffix []byte) bool { + return ascii.HasSuffixFold(s, suffix) +} + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a, b string) bool { + return ascii.EqualFoldString(a, b) +} + +func HasPrefixFoldString(s, prefix string) bool { + return ascii.HasPrefixFoldString(s, prefix) +} + +func HasSuffixFoldString(s, suffix string) bool { + return ascii.HasSuffixFoldString(s, suffix) +} diff --git a/vendor/github.com/segmentio/encoding/ascii/valid.go b/vendor/github.com/segmentio/encoding/ascii/valid.go new file mode 100644 index 00000000000..68b7c6ca2bb --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/valid.go @@ -0,0 +1,26 @@ +//go:generate go run valid_asm.go -out valid_amd64.s -stubs valid_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// Valid returns true if b contains only ASCII characters. +func Valid(b []byte) bool { + return ascii.Valid(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidByte(b byte) bool { + return ascii.ValidByte(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidRune(r rune) bool { + return ascii.ValidRune(r) +} + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool { + return ascii.ValidString(s) +} diff --git a/vendor/github.com/segmentio/encoding/ascii/valid_print.go b/vendor/github.com/segmentio/encoding/ascii/valid_print.go new file mode 100644 index 00000000000..241f58499a8 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/valid_print.go @@ -0,0 +1,26 @@ +//go:generate go run valid_print_asm.go -out valid_print_amd64.s -stubs valid_print_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// Valid returns true if b contains only printable ASCII characters. +func ValidPrint(b []byte) bool { + return ascii.ValidPrint(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidPrintByte(b byte) bool { + return ascii.ValidPrintByte(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidPrintRune(r rune) bool { + return ascii.ValidPrintRune(r) +} + +// ValidString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool { + return ascii.ValidPrintString(s) +} diff --git a/vendor/github.com/segmentio/encoding/iso8601/parse.go b/vendor/github.com/segmentio/encoding/iso8601/parse.go new file mode 100644 index 00000000000..6fbe5dc31db --- /dev/null +++ b/vendor/github.com/segmentio/encoding/iso8601/parse.go @@ -0,0 +1,185 @@ +package iso8601 + +import ( + "encoding/binary" + "errors" + "time" + "unsafe" +) + +var ( + errInvalidTimestamp = errors.New("invalid ISO8601 timestamp") + errMonthOutOfRange = errors.New("month out of range") + errDayOutOfRange = errors.New("day out of range") + errHourOutOfRange = errors.New("hour out of range") + errMinuteOutOfRange = errors.New("minute out of range") + errSecondOutOfRange = errors.New("second out of range") +) + +// Parse parses an ISO8601 timestamp, e.g. "2021-03-25T21:36:12Z". +func Parse(input string) (time.Time, error) { + b := unsafeStringToBytes(input) + if len(b) >= 20 && len(b) <= 30 && b[len(b)-1] == 'Z' { + if len(b) == 21 || (len(b) > 21 && b[19] != '.') { + return time.Time{}, errInvalidTimestamp + } + + t1 := binary.LittleEndian.Uint64(b) + t2 := binary.LittleEndian.Uint64(b[8:16]) + t3 := uint64(b[16]) | uint64(b[17])<<8 | uint64(b[18])<<16 | uint64('Z')<<24 + + // Check for valid separators by masking input with " - - T : : Z". + // If separators are all valid, replace them with a '0' (0x30) byte and + // check all bytes are now numeric. + if !match(t1, mask1) || !match(t2, mask2) || !match(t3, mask3) { + return time.Time{}, errInvalidTimestamp + } + t1 ^= replace1 + t2 ^= replace2 + t3 ^= replace3 + if (nonNumeric(t1) | nonNumeric(t2) | nonNumeric(t3)) != 0 { + return time.Time{}, errInvalidTimestamp + } + + t1 -= zero + t2 -= zero + t3 -= zero + year := (t1&0xF)*1000 + (t1>>8&0xF)*100 + (t1>>16&0xF)*10 + (t1 >> 24 & 0xF) + month := (t1>>40&0xF)*10 + (t1 >> 48 & 0xF) + day := (t2&0xF)*10 + (t2 >> 8 & 0xF) + hour := (t2>>24&0xF)*10 + (t2 >> 32 & 0xF) + minute := (t2>>48&0xF)*10 + (t2 >> 56) + second := (t3>>8&0xF)*10 + (t3 >> 16) + + nanos := int64(0) + if len(b) > 20 { + for _, c := range b[20 : len(b)-1] { + if c < '0' || c > '9' { + return time.Time{}, errInvalidTimestamp + } + nanos = (nanos * 10) + int64(c-'0') + } + nanos *= pow10[30-len(b)] + } + + if err := validate(year, month, day, hour, minute, second); err != nil { + return time.Time{}, err + } + + unixSeconds := int64(daysSinceEpoch(year, month, day))*86400 + int64(hour*3600+minute*60+second) + return time.Unix(unixSeconds, nanos).UTC(), nil + } + + // Fallback to using time.Parse(). + t, err := time.Parse(time.RFC3339Nano, input) + if err != nil { + // Override (and don't wrap) the error here. The error returned by + // time.Parse() is dynamic, and includes a reference to the input + // string. By overriding the error, we guarantee that the input string + // doesn't escape. + return time.Time{}, errInvalidTimestamp + } + return t, nil +} + +var pow10 = []int64{1, 10, 100, 1000, 1e4, 1e5, 1e6, 1e7, 1e8} + +const ( + mask1 = 0x2d00002d00000000 // YYYY-MM- + mask2 = 0x00003a0000540000 // DDTHH:MM + mask3 = 0x000000005a00003a // :SSZ____ + + // Generate masks that replace the separators with a numeric byte. + // The input must have valid separators. XOR with the separator bytes + // to zero them out and then XOR with 0x30 to replace them with '0'. + replace1 = mask1 ^ 0x3000003000000000 + replace2 = mask2 ^ 0x0000300000300000 + replace3 = mask3 ^ 0x3030303030000030 + + lsb = ^uint64(0) / 255 + msb = lsb * 0x80 + + zero = lsb * '0' + nine = lsb * '9' +) + +func validate(year, month, day, hour, minute, second uint64) error { + if day == 0 || day > 31 { + return errDayOutOfRange + } + if month == 0 || month > 12 { + return errMonthOutOfRange + } + if hour >= 24 { + return errHourOutOfRange + } + if minute >= 60 { + return errMinuteOutOfRange + } + if second >= 60 { + return errSecondOutOfRange + } + if month == 2 && (day > 29 || (day == 29 && !isLeapYear(year))) { + return errDayOutOfRange + } + if day == 31 { + switch month { + case 4, 6, 9, 11: + return errDayOutOfRange + } + } + return nil +} + +func match(u, mask uint64) bool { + return (u & mask) == mask +} + +func nonNumeric(u uint64) uint64 { + // Derived from https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord. + // Subtract '0' (0x30) from each byte so that the MSB is set in each byte + // if there's a byte less than '0' (0x30). Add 0x46 (0x7F-'9') so that the + // MSB is set if there's a byte greater than '9' (0x39). To handle overflow + // when adding 0x46, include the MSB from the input bytes in the final mask. + // Remove all but the MSBs and then you're left with a mask where each + // non-numeric byte from the input has its MSB set in the output. + return ((u - zero) | (u + (^msb - nine)) | u) & msb +} + +func daysSinceEpoch(year, month, day uint64) uint64 { + // Derived from https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html. + monthAdjusted := month - 3 + var carry uint64 + if monthAdjusted > month { + carry = 1 + } + var adjust uint64 + if carry == 1 { + adjust = 12 + } + yearAdjusted := year + 4800 - carry + monthDays := ((monthAdjusted+adjust)*62719 + 769) / 2048 + leapDays := yearAdjusted/4 - yearAdjusted/100 + yearAdjusted/400 + return yearAdjusted*365 + leapDays + monthDays + (day - 1) - 2472632 +} + +func isLeapYear(y uint64) bool { + return (y%4) == 0 && ((y%100) != 0 || (y%400) == 0) +} + +func unsafeStringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s), + Cap: len(s), + })) +} + +// sliceHeader is like reflect.SliceHeader but the Data field is a +// unsafe.Pointer instead of being a uintptr to avoid invalid +// conversions from uintptr to unsafe.Pointer. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} diff --git a/vendor/github.com/segmentio/encoding/iso8601/valid.go b/vendor/github.com/segmentio/encoding/iso8601/valid.go new file mode 100644 index 00000000000..2c4edec77bb --- /dev/null +++ b/vendor/github.com/segmentio/encoding/iso8601/valid.go @@ -0,0 +1,179 @@ +package iso8601 + +// ValidFlags is a bitset type used to configure the behavior of the Valid +//function. +type ValidFlags int + +const ( + // Strict is a validation flag used to represent a string iso8601 validation + // (this is the default). + Strict ValidFlags = 0 + + // AllowSpaceSeparator allows the presence of a space instead of a 'T' as + // separator between the date and time. + AllowSpaceSeparator ValidFlags = 1 << iota + + // AllowMissingTime allows the value to contain only a date. + AllowMissingTime + + // AllowMissingSubsecond allows the value to contain only a date and time. + AllowMissingSubsecond + + // AllowMissingTimezone allows the value to be missing the timezone + // information. + AllowMissingTimezone + + // AllowNumericTimezone allows the value to represent timezones in their + // numeric form. + AllowNumericTimezone + + // Flexible is a combination of all validation flag that allow for + // non-strict checking of the input value. + Flexible = AllowSpaceSeparator | AllowMissingTime | AllowMissingSubsecond | AllowMissingTimezone | AllowNumericTimezone +) + +// Valid check value to verify whether or not it is a valid iso8601 time +// representation. +func Valid(value string, flags ValidFlags) bool { + var ok bool + + // year + if value, ok = readDigits(value, 4, 4); !ok { + return false + } + + if value, ok = readByte(value, '-'); !ok { + return false + } + + // month + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, '-'); !ok { + return false + } + + // day + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if len(value) == 0 && (flags&AllowMissingTime) != 0 { + return true // date only + } + + // separator + if value, ok = readByte(value, 'T'); !ok { + if (flags & AllowSpaceSeparator) == 0 { + return false + } + if value, ok = readByte(value, ' '); !ok { + return false + } + } + + // hour + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + return false + } + + // minute + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + return false + } + + // second + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + // microsecond + if value, ok = readByte(value, '.'); !ok { + if (flags & AllowMissingSubsecond) == 0 { + return false + } + } else { + if value, ok = readDigits(value, 1, 9); !ok { + return false + } + } + + if len(value) == 0 && (flags&AllowMissingTimezone) != 0 { + return true // date and time + } + + // timezone + if value, ok = readByte(value, 'Z'); ok { + return len(value) == 0 + } + + if (flags & AllowSpaceSeparator) != 0 { + value, _ = readByte(value, ' ') + } + + if value, ok = readByte(value, '+'); !ok { + if value, ok = readByte(value, '-'); !ok { + return false + } + } + + // timezone hour + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + if (flags & AllowNumericTimezone) == 0 { + return false + } + } + + // timezone minute + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + return len(value) == 0 +} + +func readDigits(value string, min, max int) (string, bool) { + if len(value) < min { + return value, false + } + + i := 0 + + for i < max && i < len(value) && isDigit(value[i]) { + i++ + } + + if i < max && i < min { + return value, false + } + + return value[i:], true +} + +func readByte(value string, c byte) (string, bool) { + if len(value) == 0 { + return value, false + } + if value[0] != c { + return value, false + } + return value[1:], true +} + +func isDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/github.com/segmentio/encoding/json/README.md b/vendor/github.com/segmentio/encoding/json/README.md new file mode 100644 index 00000000000..c5ed94b73dc --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/README.md @@ -0,0 +1,76 @@ +# encoding/json [![GoDoc](https://godoc.org/github.com/segmentio/encoding/json?status.svg)](https://godoc.org/github.com/segmentio/encoding/json) + +Go package offering a replacement implementation of the standard library's +[`encoding/json`](https://golang.org/pkg/encoding/json/) package, with much +better performance. + +## Usage + +The exported API of this package mirrors the standard library's +[`encoding/json`](https://golang.org/pkg/encoding/json/) package, the only +change needed to take advantage of the performance improvements is the import +path of the `json` package, from: +```go +import ( + "encoding/json" +) +``` +to +```go +import ( + "github.com/segmentio/encoding/json" +) +``` + +One way to gain higher encoding throughput is to disable HTML escaping. +It allows the string encoding to use a much more efficient code path which +does not require parsing UTF-8 runes most of the time. + +## Performance Improvements + +The internal implementation uses a fair amount of unsafe operations (untyped +code, pointer arithmetic, etc...) to avoid using reflection as much as possible, +which is often the reason why serialization code has a large CPU and memory +footprint. + +The package aims for zero unnecessary dynamic memory allocations and hot code +paths that are mostly free from calls into the reflect package. + +## Compatibility with encoding/json + +This package aims to be a drop-in replacement, therefore it is tested to behave +exactly like the standard library's package. However, there are still a few +missing features that have not been ported yet: + +- Streaming decoder, currently the `Decoder` implementation offered by the +package does not support progressively reading values from a JSON array (unlike +the standard library). In our experience this is a very rare use-case, if you +need it you're better off sticking to the standard library, or spend a bit of +time implementing it in here ;) + +Note that none of those features should result in performance degradations if +they were implemented in the package, and we welcome contributions! + +## Trade-offs + +As one would expect, we had to make a couple of trade-offs to achieve greater +performance than the standard library, but there were also features that we +did not want to give away. + +Other open-source packages offering a reduced CPU and memory footprint usually +do so by designing a different API, or require code generation (therefore adding +complexity to the build process). These were not acceptable conditions for us, +as we were not willing to trade off developer productivity for better runtime +performance. To achieve this, we chose to exactly replicate the standard +library interfaces and behavior, which meant the package implementation was the +only area that we were able to work with. The internals of this package make +heavy use of unsafe pointer arithmetics and other performance optimizations, +and therefore are not as approachable as typical Go programs. Basically, we put +a bigger burden on maintainers to achieve better runtime cost without +sacrificing developer productivity. + +For these reasons, we also don't believe that this code should be ported upstream +to the standard `encoding/json` package. The standard library has to remain +readable and approachable to maximize stability and maintainability, and make +projects like this one possible because a high quality reference implementation +already exists. diff --git a/vendor/github.com/segmentio/encoding/json/codec.go b/vendor/github.com/segmentio/encoding/json/codec.go new file mode 100644 index 00000000000..908c3f6dc3a --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/codec.go @@ -0,0 +1,1232 @@ +package json + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" + "unicode" + "unsafe" + + "github.com/segmentio/asm/keyset" +) + +const ( + // 1000 is the value used by the standard encoding/json package. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.17.3:src/encoding/json/encode.go;drc=refs%2Ftags%2Fgo1.17.3;l=300 + startDetectingCyclesAfter = 1000 +) + +type codec struct { + encode encodeFunc + decode decodeFunc +} + +type encoder struct { + flags AppendFlags + // ptrDepth tracks the depth of pointer cycles, when it reaches the value + // of startDetectingCyclesAfter, the ptrSeen map is allocated and the + // encoder starts tracking pointers it has seen as an attempt to detect + // whether it has entered a pointer cycle and needs to error before the + // goroutine runs out of stack space. + ptrDepth uint32 + ptrSeen map[unsafe.Pointer]struct{} +} + +type decoder struct { + flags ParseFlags +} + +type encodeFunc func(encoder, []byte, unsafe.Pointer) ([]byte, error) +type decodeFunc func(decoder, []byte, unsafe.Pointer) ([]byte, error) + +type emptyFunc func(unsafe.Pointer) bool +type sortFunc func([]reflect.Value) + +var ( + // Eventually consistent cache mapping go types to dynamically generated + // codecs. + // + // Note: using a uintptr as key instead of reflect.Type shaved ~15ns off of + // the ~30ns Marhsal/Unmarshal functions which were dominated by the map + // lookup time for simple types like bool, int, etc.. + cache unsafe.Pointer // map[unsafe.Pointer]codec +) + +func cacheLoad() map[unsafe.Pointer]codec { + p := atomic.LoadPointer(&cache) + return *(*map[unsafe.Pointer]codec)(unsafe.Pointer(&p)) +} + +func cacheStore(typ reflect.Type, cod codec, oldCodecs map[unsafe.Pointer]codec) { + newCodecs := make(map[unsafe.Pointer]codec, len(oldCodecs)+1) + newCodecs[typeid(typ)] = cod + + for t, c := range oldCodecs { + newCodecs[t] = c + } + + atomic.StorePointer(&cache, *(*unsafe.Pointer)(unsafe.Pointer(&newCodecs))) +} + +func typeid(t reflect.Type) unsafe.Pointer { + return (*iface)(unsafe.Pointer(&t)).ptr +} + +func constructCachedCodec(t reflect.Type, cache map[unsafe.Pointer]codec) codec { + c := constructCodec(t, map[reflect.Type]*structType{}, t.Kind() == reflect.Ptr) + + if inlined(t) { + c.encode = constructInlineValueEncodeFunc(c.encode) + } + + cacheStore(t, c, cache) + return c +} + +func constructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) (c codec) { + switch t { + case nullType, nil: + c = codec{encode: encoder.encodeNull, decode: decoder.decodeNull} + + case numberType: + c = codec{encode: encoder.encodeNumber, decode: decoder.decodeNumber} + + case bytesType: + c = codec{encode: encoder.encodeBytes, decode: decoder.decodeBytes} + + case durationType: + c = codec{encode: encoder.encodeDuration, decode: decoder.decodeDuration} + + case timeType: + c = codec{encode: encoder.encodeTime, decode: decoder.decodeTime} + + case interfaceType: + c = codec{encode: encoder.encodeInterface, decode: decoder.decodeInterface} + + case rawMessageType: + c = codec{encode: encoder.encodeRawMessage, decode: decoder.decodeRawMessage} + + case numberPtrType: + c = constructPointerCodec(numberPtrType, nil) + + case durationPtrType: + c = constructPointerCodec(durationPtrType, nil) + + case timePtrType: + c = constructPointerCodec(timePtrType, nil) + + case rawMessagePtrType: + c = constructPointerCodec(rawMessagePtrType, nil) + } + + if c.encode != nil { + return + } + + switch t.Kind() { + case reflect.Bool: + c = codec{encode: encoder.encodeBool, decode: decoder.decodeBool} + + case reflect.Int: + c = codec{encode: encoder.encodeInt, decode: decoder.decodeInt} + + case reflect.Int8: + c = codec{encode: encoder.encodeInt8, decode: decoder.decodeInt8} + + case reflect.Int16: + c = codec{encode: encoder.encodeInt16, decode: decoder.decodeInt16} + + case reflect.Int32: + c = codec{encode: encoder.encodeInt32, decode: decoder.decodeInt32} + + case reflect.Int64: + c = codec{encode: encoder.encodeInt64, decode: decoder.decodeInt64} + + case reflect.Uint: + c = codec{encode: encoder.encodeUint, decode: decoder.decodeUint} + + case reflect.Uintptr: + c = codec{encode: encoder.encodeUintptr, decode: decoder.decodeUintptr} + + case reflect.Uint8: + c = codec{encode: encoder.encodeUint8, decode: decoder.decodeUint8} + + case reflect.Uint16: + c = codec{encode: encoder.encodeUint16, decode: decoder.decodeUint16} + + case reflect.Uint32: + c = codec{encode: encoder.encodeUint32, decode: decoder.decodeUint32} + + case reflect.Uint64: + c = codec{encode: encoder.encodeUint64, decode: decoder.decodeUint64} + + case reflect.Float32: + c = codec{encode: encoder.encodeFloat32, decode: decoder.decodeFloat32} + + case reflect.Float64: + c = codec{encode: encoder.encodeFloat64, decode: decoder.decodeFloat64} + + case reflect.String: + c = codec{encode: encoder.encodeString, decode: decoder.decodeString} + + case reflect.Interface: + c = constructInterfaceCodec(t) + + case reflect.Array: + c = constructArrayCodec(t, seen, canAddr) + + case reflect.Slice: + c = constructSliceCodec(t, seen) + + case reflect.Map: + c = constructMapCodec(t, seen) + + case reflect.Struct: + c = constructStructCodec(t, seen, canAddr) + + case reflect.Ptr: + c = constructPointerCodec(t, seen) + + default: + c = constructUnsupportedTypeCodec(t) + } + + p := reflect.PtrTo(t) + + if canAddr { + switch { + case p.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(t, true) + case p.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(t, true) + } + } + + switch { + case t.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(t, false) + case t.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(t, false) + } + + switch { + case p.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(t, true) + case p.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(t, true) + } + + return +} + +func constructStringCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + c := constructCodec(t, seen, canAddr) + return codec{ + encode: constructStringEncodeFunc(c.encode), + decode: constructStringDecodeFunc(c.decode), + } +} + +func constructStringEncodeFunc(encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeToString(b, p, encode) + } +} + +func constructStringDecodeFunc(decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeFromString(b, p, decode) + } +} + +func constructStringToIntDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeFromStringToInt(b, p, t, decode) + } +} + +func constructArrayCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + e := t.Elem() + c := constructCodec(e, seen, canAddr) + s := alignedSize(e) + return codec{ + encode: constructArrayEncodeFunc(s, t, c.encode), + decode: constructArrayDecodeFunc(s, t, c.decode), + } +} + +func constructArrayEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { + n := t.Len() + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeArray(b, p, n, size, t, encode) + } +} + +func constructArrayDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { + n := t.Len() + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeArray(b, p, n, size, t, decode) + } +} + +func constructSliceCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + e := t.Elem() + s := alignedSize(e) + + if e.Kind() == reflect.Uint8 { + // Go 1.7+ behavior: slices of byte types (and aliases) may override the + // default encoding and decoding behaviors by implementing marshaler and + // unmarshaler interfaces. + p := reflect.PtrTo(e) + c := codec{} + + switch { + case e.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(e, false) + case e.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(e, false) + case p.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(e, true) + case p.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(e, true) + } + + switch { + case e.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(e, false) + case e.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(e, false) + case p.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(e, true) + case p.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(e, true) + } + + if c.encode != nil { + c.encode = constructSliceEncodeFunc(s, t, c.encode) + } else { + c.encode = encoder.encodeBytes + } + + if c.decode != nil { + c.decode = constructSliceDecodeFunc(s, t, c.decode) + } else { + c.decode = decoder.decodeBytes + } + + return c + } + + c := constructCodec(e, seen, true) + return codec{ + encode: constructSliceEncodeFunc(s, t, c.encode), + decode: constructSliceDecodeFunc(s, t, c.decode), + } +} + +func constructSliceEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeSlice(b, p, size, t, encode) + } +} + +func constructSliceDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeSlice(b, p, size, t, decode) + } +} + +func constructMapCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + var sortKeys sortFunc + k := t.Key() + v := t.Elem() + + // Faster implementations for some common cases. + switch { + case k == stringType && v == interfaceType: + return codec{ + encode: encoder.encodeMapStringInterface, + decode: decoder.decodeMapStringInterface, + } + + case k == stringType && v == rawMessageType: + return codec{ + encode: encoder.encodeMapStringRawMessage, + decode: decoder.decodeMapStringRawMessage, + } + + case k == stringType && v == stringType: + return codec{ + encode: encoder.encodeMapStringString, + decode: decoder.decodeMapStringString, + } + + case k == stringType && v == stringsType: + return codec{ + encode: encoder.encodeMapStringStringSlice, + decode: decoder.decodeMapStringStringSlice, + } + + case k == stringType && v == boolType: + return codec{ + encode: encoder.encodeMapStringBool, + decode: decoder.decodeMapStringBool, + } + } + + kc := codec{} + vc := constructCodec(v, seen, false) + + if k.Implements(textMarshalerType) || reflect.PtrTo(k).Implements(textUnmarshalerType) { + kc.encode = constructTextMarshalerEncodeFunc(k, false) + kc.decode = constructTextUnmarshalerDecodeFunc(k, true) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { + // This is a performance abomination but the use case is rare + // enough that it shouldn't be a problem in practice. + k1, _ := keys[i].Interface().(encoding.TextMarshaler).MarshalText() + k2, _ := keys[j].Interface().(encoding.TextMarshaler).MarshalText() + return string(k1) < string(k2) + }) + } + } else { + switch k.Kind() { + case reflect.String: + kc.encode = encoder.encodeString + kc.decode = decoder.decodeString + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return keys[i].String() < keys[j].String() }) + } + + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64: + kc = constructStringCodec(k, seen, false) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return intStringsAreSorted(keys[i].Int(), keys[j].Int()) }) + } + + case reflect.Uint, + reflect.Uintptr, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + kc = constructStringCodec(k, seen, false) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return uintStringsAreSorted(keys[i].Uint(), keys[j].Uint()) }) + } + + default: + return constructUnsupportedTypeCodec(t) + } + } + + if inlined(v) { + vc.encode = constructInlineValueEncodeFunc(vc.encode) + } + + return codec{ + encode: constructMapEncodeFunc(t, kc.encode, vc.encode, sortKeys), + decode: constructMapDecodeFunc(t, kc.decode, vc.decode), + } +} + +func constructMapEncodeFunc(t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeMap(b, p, t, encodeKey, encodeValue, sortKeys) + } +} + +func constructMapDecodeFunc(t reflect.Type, decodeKey, decodeValue decodeFunc) decodeFunc { + kt := t.Key() + vt := t.Elem() + kz := reflect.Zero(kt) + vz := reflect.Zero(vt) + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeMap(b, p, t, kt, vt, kz, vz, decodeKey, decodeValue) + } +} + +func constructStructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + st := constructStructType(t, seen, canAddr) + return codec{ + encode: constructStructEncodeFunc(st), + decode: constructStructDecodeFunc(st), + } +} + +func constructStructType(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) *structType { + // Used for preventing infinite recursion on types that have pointers to + // themselves. + st := seen[t] + + if st == nil { + st = &structType{ + fields: make([]structField, 0, t.NumField()), + fieldsIndex: make(map[string]*structField), + ficaseIndex: make(map[string]*structField), + typ: t, + } + + seen[t] = st + st.fields = appendStructFields(st.fields, t, 0, seen, canAddr) + + for i := range st.fields { + f := &st.fields[i] + s := strings.ToLower(f.name) + st.fieldsIndex[f.name] = f + // When there is ambiguity because multiple fields have the same + // case-insensitive representation, the first field must win. + if _, exists := st.ficaseIndex[s]; !exists { + st.ficaseIndex[s] = f + } + } + + // At a certain point the linear scan provided by keyset is less + // efficient than a map. The 32 was chosen based on benchmarks in the + // segmentio/asm repo run with an Intel Kaby Lake processor and go1.17. + if len(st.fields) <= 32 { + keys := make([][]byte, len(st.fields)) + for i, f := range st.fields { + keys[i] = []byte(f.name) + } + st.keyset = keyset.New(keys) + } + } + + return st +} + +func constructStructEncodeFunc(st *structType) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeStruct(b, p, st) + } +} + +func constructStructDecodeFunc(st *structType) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeStruct(b, p, st) + } +} + +func constructEmbeddedStructPointerCodec(t reflect.Type, unexported bool, offset uintptr, field codec) codec { + return codec{ + encode: constructEmbeddedStructPointerEncodeFunc(t, unexported, offset, field.encode), + decode: constructEmbeddedStructPointerDecodeFunc(t, unexported, offset, field.decode), + } +} + +func constructEmbeddedStructPointerEncodeFunc(t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeEmbeddedStructPointer(b, p, t, unexported, offset, encode) + } +} + +func constructEmbeddedStructPointerDecodeFunc(t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeEmbeddedStructPointer(b, p, t, unexported, offset, decode) + } +} + +func appendStructFields(fields []structField, t reflect.Type, offset uintptr, seen map[reflect.Type]*structType, canAddr bool) []structField { + type embeddedField struct { + index int + offset uintptr + pointer bool + unexported bool + subtype *structType + subfield *structField + } + + names := make(map[string]struct{}) + embedded := make([]embeddedField, 0, 10) + + for i, n := 0, t.NumField(); i < n; i++ { + f := t.Field(i) + + var ( + name = f.Name + anonymous = f.Anonymous + tag = false + omitempty = false + stringify = false + unexported = len(f.PkgPath) != 0 + ) + + if unexported && !anonymous { // unexported + continue + } + + if parts := strings.Split(f.Tag.Get("json"), ","); len(parts) != 0 { + if len(parts[0]) != 0 { + name, tag = parts[0], true + } + + if name == "-" && len(parts) == 1 { // ignored + continue + } + + if !isValidTag(name) { + name = f.Name + } + + for _, tag := range parts[1:] { + switch tag { + case "omitempty": + omitempty = true + case "string": + stringify = true + } + } + } + + if anonymous && !tag { // embedded + typ := f.Type + ptr := f.Type.Kind() == reflect.Ptr + + if ptr { + typ = f.Type.Elem() + } + + if typ.Kind() == reflect.Struct { + // When the embedded fields is inlined the fields can be looked + // up by offset from the address of the wrapping object, so we + // simply add the embedded struct fields to the list of fields + // of the current struct type. + subtype := constructStructType(typ, seen, canAddr) + + for j := range subtype.fields { + embedded = append(embedded, embeddedField{ + index: i<<32 | j, + offset: offset + f.Offset, + pointer: ptr, + unexported: unexported, + subtype: subtype, + subfield: &subtype.fields[j], + }) + } + + continue + } + + if unexported { // ignore unexported non-struct types + continue + } + } + + codec := constructCodec(f.Type, seen, canAddr) + + if stringify { + // https://golang.org/pkg/encoding/json/#Marshal + // + // The "string" option signals that a field is stored as JSON inside + // a JSON-encoded string. It applies only to fields of string, + // floating point, integer, or boolean types. This extra level of + // encoding is sometimes used when communicating with JavaScript + // programs: + typ := f.Type + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + switch typ.Kind() { + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uintptr, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + codec.encode = constructStringEncodeFunc(codec.encode) + codec.decode = constructStringToIntDecodeFunc(typ, codec.decode) + case reflect.Bool, + reflect.Float32, + reflect.Float64, + reflect.String: + codec.encode = constructStringEncodeFunc(codec.encode) + codec.decode = constructStringDecodeFunc(codec.decode) + } + } + + fields = append(fields, structField{ + codec: codec, + offset: offset + f.Offset, + empty: emptyFuncOf(f.Type), + tag: tag, + omitempty: omitempty, + name: name, + index: i << 32, + typ: f.Type, + zero: reflect.Zero(f.Type), + }) + + names[name] = struct{}{} + } + + // Only unambiguous embedded fields must be serialized. + ambiguousNames := make(map[string]int) + ambiguousTags := make(map[string]int) + + // Embedded types can never override a field that was already present at + // the top-level. + for name := range names { + ambiguousNames[name]++ + ambiguousTags[name]++ + } + + for _, embfield := range embedded { + ambiguousNames[embfield.subfield.name]++ + if embfield.subfield.tag { + ambiguousTags[embfield.subfield.name]++ + } + } + + for _, embfield := range embedded { + subfield := *embfield.subfield + + if ambiguousNames[subfield.name] > 1 && !(subfield.tag && ambiguousTags[subfield.name] == 1) { + continue // ambiguous embedded field + } + + if embfield.pointer { + subfield.codec = constructEmbeddedStructPointerCodec(embfield.subtype.typ, embfield.unexported, subfield.offset, subfield.codec) + subfield.offset = embfield.offset + } else { + subfield.offset += embfield.offset + } + + // To prevent dominant flags more than one level below the embedded one. + subfield.tag = false + + // To ensure the order of the fields in the output is the same is in the + // struct type. + subfield.index = embfield.index + + fields = append(fields, subfield) + } + + for i := range fields { + name := fields[i].name + fields[i].json = encodeKeyFragment(name, 0) + fields[i].html = encodeKeyFragment(name, EscapeHTML) + } + + sort.Slice(fields, func(i, j int) bool { return fields[i].index < fields[j].index }) + return fields +} + +func encodeKeyFragment(s string, flags AppendFlags) string { + b := make([]byte, 1, len(s)+4) + b[0] = ',' + e := encoder{flags: flags} + b, _ = e.encodeString(b, unsafe.Pointer(&s)) + b = append(b, ':') + return *(*string)(unsafe.Pointer(&b)) +} + +func constructPointerCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + e := t.Elem() + c := constructCodec(e, seen, true) + return codec{ + encode: constructPointerEncodeFunc(e, c.encode), + decode: constructPointerDecodeFunc(e, c.decode), + } +} + +func constructPointerEncodeFunc(t reflect.Type, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodePointer(b, p, t, encode) + } +} + +func constructPointerDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodePointer(b, p, t, decode) + } +} + +func constructInterfaceCodec(t reflect.Type) codec { + return codec{ + encode: constructMaybeEmptyInterfaceEncoderFunc(t), + decode: constructMaybeEmptyInterfaceDecoderFunc(t), + } +} + +func constructMaybeEmptyInterfaceEncoderFunc(t reflect.Type) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeMaybeEmptyInterface(b, p, t) + } +} + +func constructMaybeEmptyInterfaceDecoderFunc(t reflect.Type) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeMaybeEmptyInterface(b, p, t) + } +} + +func constructUnsupportedTypeCodec(t reflect.Type) codec { + return codec{ + encode: constructUnsupportedTypeEncodeFunc(t), + decode: constructUnsupportedTypeDecodeFunc(t), + } +} + +func constructUnsupportedTypeEncodeFunc(t reflect.Type) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeUnsupportedTypeError(b, p, t) + } +} + +func constructUnsupportedTypeDecodeFunc(t reflect.Type) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeUnmarshalTypeError(b, p, t) + } +} + +func constructJSONMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeJSONMarshaler(b, p, t, pointer) + } +} + +func constructJSONUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeJSONUnmarshaler(b, p, t, pointer) + } +} + +func constructTextMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeTextMarshaler(b, p, t, pointer) + } +} + +func constructTextUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeTextUnmarshaler(b, p, t, pointer) + } +} + +func constructInlineValueEncodeFunc(encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return encode(e, b, noescape(unsafe.Pointer(&p))) + } +} + +// noescape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to zero instructions. +// USE CAREFULLY! +// This was copied from the runtime; see issues 23382 and 7921. +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func alignedSize(t reflect.Type) uintptr { + a := t.Align() + s := t.Size() + return align(uintptr(a), uintptr(s)) +} + +func align(align, size uintptr) uintptr { + if align != 0 && (size%align) != 0 { + size = ((size / align) + 1) * align + } + return size +} + +func inlined(t reflect.Type) bool { + switch t.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return t.NumField() == 1 && inlined(t.Field(0).Type) + default: + return false + } +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +func emptyFuncOf(t reflect.Type) emptyFunc { + switch t { + case bytesType, rawMessageType: + return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } + } + + switch t.Kind() { + case reflect.Array: + if t.Len() == 0 { + return func(unsafe.Pointer) bool { return true } + } + + case reflect.Map: + return func(p unsafe.Pointer) bool { return reflect.NewAt(t, p).Elem().Len() == 0 } + + case reflect.Slice: + return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } + + case reflect.String: + return func(p unsafe.Pointer) bool { return len(*(*string)(p)) == 0 } + + case reflect.Bool: + return func(p unsafe.Pointer) bool { return !*(*bool)(p) } + + case reflect.Int, reflect.Uint: + return func(p unsafe.Pointer) bool { return *(*uint)(p) == 0 } + + case reflect.Uintptr: + return func(p unsafe.Pointer) bool { return *(*uintptr)(p) == 0 } + + case reflect.Int8, reflect.Uint8: + return func(p unsafe.Pointer) bool { return *(*uint8)(p) == 0 } + + case reflect.Int16, reflect.Uint16: + return func(p unsafe.Pointer) bool { return *(*uint16)(p) == 0 } + + case reflect.Int32, reflect.Uint32: + return func(p unsafe.Pointer) bool { return *(*uint32)(p) == 0 } + + case reflect.Int64, reflect.Uint64: + return func(p unsafe.Pointer) bool { return *(*uint64)(p) == 0 } + + case reflect.Float32: + return func(p unsafe.Pointer) bool { return *(*float32)(p) == 0 } + + case reflect.Float64: + return func(p unsafe.Pointer) bool { return *(*float64)(p) == 0 } + + case reflect.Ptr: + return func(p unsafe.Pointer) bool { return *(*unsafe.Pointer)(p) == nil } + + case reflect.Interface: + return func(p unsafe.Pointer) bool { return (*iface)(p).ptr == nil } + } + + return func(unsafe.Pointer) bool { return false } +} + +type iface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +type slice struct { + data unsafe.Pointer + len int + cap int +} + +type structType struct { + fields []structField + fieldsIndex map[string]*structField + ficaseIndex map[string]*structField + keyset []byte + typ reflect.Type + inlined bool +} + +type structField struct { + codec codec + offset uintptr + empty emptyFunc + tag bool + omitempty bool + json string + html string + name string + typ reflect.Type + zero reflect.Value + index int +} + +func unmarshalTypeError(b []byte, t reflect.Type) error { + return &UnmarshalTypeError{Value: strconv.Quote(prefix(b)), Type: t} +} + +func unmarshalOverflow(b []byte, t reflect.Type) error { + return &UnmarshalTypeError{Value: "number " + prefix(b) + " overflows", Type: t} +} + +func unexpectedEOF(b []byte) error { + return syntaxError(b, "unexpected end of JSON input") +} + +var syntaxErrorMsgOffset = ^uintptr(0) + +func init() { + t := reflect.TypeOf(SyntaxError{}) + for i, n := 0, t.NumField(); i < n; i++ { + if f := t.Field(i); f.Type.Kind() == reflect.String { + syntaxErrorMsgOffset = f.Offset + } + } +} + +func syntaxError(b []byte, msg string, args ...interface{}) error { + e := new(SyntaxError) + i := syntaxErrorMsgOffset + if i != ^uintptr(0) { + s := "json: " + fmt.Sprintf(msg, args...) + ": " + prefix(b) + p := unsafe.Pointer(e) + // Hack to set the unexported `msg` field. + *(*string)(unsafe.Pointer(uintptr(p) + i)) = s + } + return e +} + +func objectKeyError(b []byte, err error) ([]byte, error) { + if len(b) == 0 { + return nil, unexpectedEOF(b) + } + switch err.(type) { + case *UnmarshalTypeError: + err = syntaxError(b, "invalid character '%c' looking for beginning of object key", b[0]) + } + return b, err +} + +func prefix(b []byte) string { + if len(b) < 32 { + return string(b) + } + return string(b[:32]) + "..." +} + +func intStringsAreSorted(i0, i1 int64) bool { + var b0, b1 [32]byte + return string(strconv.AppendInt(b0[:0], i0, 10)) < string(strconv.AppendInt(b1[:0], i1, 10)) +} + +func uintStringsAreSorted(u0, u1 uint64) bool { + var b0, b1 [32]byte + return string(strconv.AppendUint(b0[:0], u0, 10)) < string(strconv.AppendUint(b1[:0], u1, 10)) +} + +func stringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s), + Cap: len(s), + })) +} + +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +var ( + nullType = reflect.TypeOf(nil) + boolType = reflect.TypeOf(false) + + intType = reflect.TypeOf(int(0)) + int8Type = reflect.TypeOf(int8(0)) + int16Type = reflect.TypeOf(int16(0)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + + uintType = reflect.TypeOf(uint(0)) + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + uintptrType = reflect.TypeOf(uintptr(0)) + + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + + numberType = reflect.TypeOf(json.Number("")) + stringType = reflect.TypeOf("") + stringsType = reflect.TypeOf([]string(nil)) + bytesType = reflect.TypeOf(([]byte)(nil)) + durationType = reflect.TypeOf(time.Duration(0)) + timeType = reflect.TypeOf(time.Time{}) + rawMessageType = reflect.TypeOf(RawMessage(nil)) + + numberPtrType = reflect.PtrTo(numberType) + durationPtrType = reflect.PtrTo(durationType) + timePtrType = reflect.PtrTo(timeType) + rawMessagePtrType = reflect.PtrTo(rawMessageType) + + sliceInterfaceType = reflect.TypeOf(([]interface{})(nil)) + sliceStringType = reflect.TypeOf(([]interface{})(nil)) + mapStringInterfaceType = reflect.TypeOf((map[string]interface{})(nil)) + mapStringRawMessageType = reflect.TypeOf((map[string]RawMessage)(nil)) + mapStringStringType = reflect.TypeOf((map[string]string)(nil)) + mapStringStringSliceType = reflect.TypeOf((map[string][]string)(nil)) + mapStringBoolType = reflect.TypeOf((map[string]bool)(nil)) + + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + jsonMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + jsonUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// ============================================================================= +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// appendDuration appends a human-readable representation of d to b. +// +// The function copies the implementation of time.Duration.String but prevents +// Go from making a dynamic memory allocation on the returned value. +func appendDuration(b []byte, d time.Duration) []byte { + // Largest time is 2540400h10m10.000000000s + var buf [32]byte + w := len(buf) + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + if u < uint64(time.Second) { + // Special case: if duration is smaller than a second, + // use smaller units, like 1.2ms + var prec int + w-- + buf[w] = 's' + w-- + switch { + case u == 0: + return append(b, '0', 's') + case u < uint64(time.Microsecond): + // print nanoseconds + prec = 0 + buf[w] = 'n' + case u < uint64(time.Millisecond): + // print microseconds + prec = 3 + // U+00B5 'Āµ' micro sign == 0xC2 0xB5 + w-- // Need room for two bytes. + copy(buf[w:], "Āµ") + default: + // print milliseconds + prec = 6 + buf[w] = 'm' + } + w, u = fmtFrac(buf[:w], u, prec) + w = fmtInt(buf[:w], u) + } else { + w-- + buf[w] = 's' + + w, u = fmtFrac(buf[:w], u, 9) + + // u is now integer seconds + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer minutes + if u > 0 { + w-- + buf[w] = 'm' + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer hours + // Stop at hours because days can be different lengths. + if u > 0 { + w-- + buf[w] = 'h' + w = fmtInt(buf[:w], u) + } + } + } + + if neg { + w-- + buf[w] = '-' + } + + return append(b, buf[w:]...) +} + +// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the +// tail of buf, omitting trailing zeros. it omits the decimal +// point too when the fraction is 0. It returns the index where the +// output bytes begin and the value v/10**prec. +func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) { + // Omit trailing zeros up to and including decimal point. + w := len(buf) + print := false + for i := 0; i < prec; i++ { + digit := v % 10 + print = print || digit != 0 + if print { + w-- + buf[w] = byte(digit) + '0' + } + v /= 10 + } + if print { + w-- + buf[w] = '.' + } + return w, v +} + +// fmtInt formats v into the tail of buf. +// It returns the index where the output begins. +func fmtInt(buf []byte, v uint64) int { + w := len(buf) + if v == 0 { + w-- + buf[w] = '0' + } else { + for v > 0 { + w-- + buf[w] = byte(v%10) + '0' + v /= 10 + } + } + return w +} + +// ============================================================================= diff --git a/vendor/github.com/segmentio/encoding/json/decode.go b/vendor/github.com/segmentio/encoding/json/decode.go new file mode 100644 index 00000000000..b1723c200c3 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/decode.go @@ -0,0 +1,1462 @@ +package json + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" + "unsafe" + + "github.com/segmentio/asm/base64" + "github.com/segmentio/asm/keyset" + "github.com/segmentio/encoding/iso8601" +) + +func (d decoder) decodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + return d.inputError(b, nullType) +} + +func (d decoder) decodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { + switch { + case hasTruePrefix(b): + *(*bool)(p) = true + return b[4:], nil + + case hasFalsePrefix(b): + *(*bool)(p) = false + return b[5:], nil + + case hasNullPrefix(b): + return b[4:], nil + + default: + return d.inputError(b, boolType) + } +} + +func (d decoder) decodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, intType) + if err != nil { + return r, err + } + + *(*int)(p) = int(v) + return r, nil +} + +func (d decoder) decodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int8Type) + if err != nil { + return r, err + } + + if v < math.MinInt8 || v > math.MaxInt8 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int8Type) + } + + *(*int8)(p) = int8(v) + return r, nil +} + +func (d decoder) decodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int16Type) + if err != nil { + return r, err + } + + if v < math.MinInt16 || v > math.MaxInt16 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int16Type) + } + + *(*int16)(p) = int16(v) + return r, nil +} + +func (d decoder) decodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int32Type) + if err != nil { + return r, err + } + + if v < math.MinInt32 || v > math.MaxInt32 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) + } + + *(*int32)(p) = int32(v) + return r, nil +} + +func (d decoder) decodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int64Type) + if err != nil { + return r, err + } + + *(*int64)(p) = v + return r, nil +} + +func (d decoder) decodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uintType) + if err != nil { + return r, err + } + + *(*uint)(p) = uint(v) + return r, nil +} + +func (d decoder) decodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uintptrType) + if err != nil { + return r, err + } + + *(*uintptr)(p) = uintptr(v) + return r, nil +} + +func (d decoder) decodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint8Type) + if err != nil { + return r, err + } + + if v > math.MaxUint8 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint8Type) + } + + *(*uint8)(p) = uint8(v) + return r, nil +} + +func (d decoder) decodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint16Type) + if err != nil { + return r, err + } + + if v > math.MaxUint16 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint16Type) + } + + *(*uint16)(p) = uint16(v) + return r, nil +} + +func (d decoder) decodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint32Type) + if err != nil { + return r, err + } + + if v > math.MaxUint32 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint32Type) + } + + *(*uint32)(p) = uint32(v) + return r, nil +} + +func (d decoder) decodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint64Type) + if err != nil { + return r, err + } + + *(*uint64)(p) = v + return r, nil +} + +func (d decoder) decodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, float32Type) + } + + f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 32) + if err != nil { + return d.inputError(b, float32Type) + } + + *(*float32)(p) = float32(f) + return r, nil +} + +func (d decoder) decodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, float64Type) + } + + f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) + if err != nil { + return d.inputError(b, float64Type) + } + + *(*float64)(p) = f + return r, nil +} + +func (d decoder) decodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, numberType) + } + + if (d.flags & DontCopyNumber) != 0 { + *(*Number)(p) = *(*Number)(unsafe.Pointer(&v)) + } else { + *(*Number)(p) = Number(v) + } + + return r, nil +} + +func (d decoder) decodeString(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + s, r, new, err := d.parseStringUnquote(b, nil) + if err != nil { + if len(b) == 0 || b[0] != '"' { + return d.inputError(b, stringType) + } + return r, err + } + + if new || (d.flags&DontCopyString) != 0 { + *(*string)(p) = *(*string)(unsafe.Pointer(&s)) + } else { + *(*string)(p) = string(s) + } + + return r, nil +} + +func (d decoder) decodeFromString(b []byte, p unsafe.Pointer, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return decode(d, b, p) + } + + v, b, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(v, stringType) + } + + if v, err = decode(d, v, p); err != nil { + return b, err + } + + if v = skipSpaces(v); len(v) != 0 { + return b, syntaxError(v, "unexpected trailing tokens after string value") + } + + return b, nil +} + +func (d decoder) decodeFromStringToInt(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return decode(d, b, p) + } + + if len(b) > 0 && b[0] != '"' { + v, r, k, err := d.parseNumber(b) + if err == nil { + // The encoding/json package will return a *json.UnmarshalTypeError if + // the input was a floating point number representation, even tho a + // string is expected here. + if k == Float { + _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) + if err != nil { + return r, unmarshalTypeError(v, t) + } + } + } + return r, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into int") + } + + if len(b) > 1 && b[0] == '"' && b[1] == '"' { + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal \"\" into int") + } + + v, b, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(v, t) + } + + if hasLeadingZeroes(v) { + // In this context the encoding/json package accepts leading zeroes because + // it is not constrained by the JSON syntax, remove them so the parsing + // functions don't return syntax errors. + u := make([]byte, 0, len(v)) + i := 0 + + if i < len(v) && v[i] == '-' || v[i] == '+' { + u = append(u, v[i]) + i++ + } + + for (i+1) < len(v) && v[i] == '0' && '0' <= v[i+1] && v[i+1] <= '9' { + i++ + } + + v = append(u, v[i:]...) + } + + if r, err := decode(d, v, p); err != nil { + if _, isSyntaxError := err.(*SyntaxError); isSyntaxError { + if hasPrefix(v, "-") { + // The standard library interprets sequences of '-' characters + // as numbers but still returns type errors in this case... + return b, unmarshalTypeError(v, t) + } + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) + } + // When the input value was a valid number representation we retain the + // error returned by the decoder. + if _, _, _, err := d.parseNumber(v); err != nil { + // When the input value valid JSON we mirror the behavior of the + // encoding/json package and return a generic error. + if _, _, _, err := d.parseValue(v); err == nil { + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) + } + } + return b, err + } else if len(r) != 0 { + return r, unmarshalTypeError(v, t) + } + + return b, nil +} + +func (d decoder) decodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*[]byte)(p) = nil + return b[4:], nil + } + + if len(b) < 2 { + return d.inputError(b, bytesType) + } + + if b[0] != '"' { + // Go 1.7- behavior: bytes slices may be decoded from array of integers. + if len(b) > 0 && b[0] == '[' { + return d.decodeSlice(b, p, 1, bytesType, decoder.decodeUint8) + } + return d.inputError(b, bytesType) + } + + // The input string contains escaped sequences, we need to parse it before + // decoding it to match the encoding/json package behvaior. + src, r, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(b, bytesType) + } + + dst := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + + n, err := base64.StdEncoding.Decode(dst, src) + if err != nil { + return r, err + } + + *(*[]byte)(p) = dst[:n] + return r, nil +} + +func (d decoder) decodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + // in order to inter-operate with the stdlib, we must be able to interpret + // durations passed as integer values. there's some discussion about being + // flexible on how durations are formatted, but for the time being, it's + // been punted to go2 at the earliest: https://github.com/golang/go/issues/4712 + if len(b) > 0 && b[0] != '"' { + v, r, err := d.parseInt(b, durationType) + if err != nil { + return d.inputError(b, int32Type) + } + + if v < math.MinInt64 || v > math.MaxInt64 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) + } + + *(*time.Duration)(p) = time.Duration(v) + return r, nil + } + + if len(b) < 2 || b[0] != '"' { + return d.inputError(b, durationType) + } + + i := bytes.IndexByte(b[1:], '"') + 1 + if i <= 0 { + return d.inputError(b, durationType) + } + + s := b[1:i] // trim quotes + + v, err := time.ParseDuration(*(*string)(unsafe.Pointer(&s))) + if err != nil { + return d.inputError(b, durationType) + } + + *(*time.Duration)(p) = v + return b[i+1:], nil +} + +func (d decoder) decodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '"' { + return d.inputError(b, timeType) + } + + i := bytes.IndexByte(b[1:], '"') + 1 + if i <= 0 { + return d.inputError(b, timeType) + } + + s := b[1:i] // trim quotes + + v, err := iso8601.Parse(*(*string)(unsafe.Pointer(&s))) + if err != nil { + return d.inputError(b, timeType) + } + + *(*time.Time)(p) = v + return b[i+1:], nil +} + +func (d decoder) decodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '[' { + return d.inputError(b, t) + } + b = b[1:] + + var err error + for i := 0; i < n; i++ { + b = skipSpaces(b) + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected EOF after array element") + } + switch b[0] { + case ',': + b = skipSpaces(b[1:]) + case ']': + return b[1:], nil + default: + return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + } + + b, err = decode(d, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))) + if err != nil { + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = t.String() + e.Struct + e.Field = d.prependField(strconv.Itoa(i), e.Field) + } + return b, err + } + } + + // The encoding/json package ignores extra elements found when decoding into + // array types (which have a fixed size). + for { + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "missing closing ']' in array value") + } + + switch b[0] { + case ',': + b = skipSpaces(b[1:]) + case ']': + return b[1:], nil + } + + _, b, _, err = d.parseValue(b) + if err != nil { + return b, err + } + } +} + +var ( + // This is a placeholder used to consturct non-nil empty slices. + empty struct{} +) + +func (d decoder) decodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + *(*slice)(p) = slice{} + return b[4:], nil + } + + if len(b) < 2 { + return d.inputError(b, t) + } + + if b[0] != '[' { + // Go 1.7- behavior: fallback to decoding as a []byte if the element + // type is byte; allow conversions from JSON strings even tho the + // underlying type implemented unmarshaler interfaces. + if t.Elem().Kind() == reflect.Uint8 { + return d.decodeBytes(b, p) + } + return d.inputError(b, t) + } + + input := b + b = b[1:] + + s := (*slice)(p) + s.len = 0 + + var err error + for { + b = skipSpaces(b) + + if len(b) != 0 && b[0] == ']' { + if s.data == nil { + s.data = unsafe.Pointer(&empty) + } + return b[1:], nil + } + + if s.len != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected EOF after array element") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if s.len == s.cap { + c := s.cap + + if c == 0 { + c = 10 + } else { + c *= 2 + } + + *s = extendSlice(t, s, c) + } + + b, err = decode(d, b, unsafe.Pointer(uintptr(s.data)+(uintptr(s.len)*size))) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = t.String() + e.Struct + e.Field = d.prependField(strconv.Itoa(s.len), e.Field) + } + return b, err + } + + s.len++ + } +} + +func (d decoder) decodeMap(b []byte, p unsafe.Pointer, t, kt, vt reflect.Type, kz, vz reflect.Value, decodeKey, decodeValue decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, t) + } + i := 0 + m := reflect.NewAt(t, p).Elem() + + k := reflect.New(kt).Elem() + v := reflect.New(vt).Elem() + + kptr := (*iface)(unsafe.Pointer(&k)).ptr + vptr := (*iface)(unsafe.Pointer(&v)).ptr + input := b + + if m.IsNil() { + m = reflect.MakeMap(t) + } + + var err error + b = b[1:] + for { + k.Set(kz) + v.Set(vz) + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = unsafe.Pointer(m.Pointer()) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + if b, err = decodeKey(d, b, kptr); err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + if b, err = decodeValue(d, b, vptr); err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = "map[" + kt.String() + "]" + vt.String() + "{" + e.Struct + "}" + e.Field = d.prependField(fmt.Sprint(k.Interface()), e.Field) + } + return b, err + } + + m.SetMapIndex(k, v) + i++ + } +} + +func (d decoder) decodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringInterfaceType) + } + + i := 0 + m := *(*map[string]interface{})(p) + + if m == nil { + m = make(map[string]interface{}, 64) + } + + var err error + var key string + var val interface{} + var input = b + + b = b[1:] + for { + key = "" + val = nil + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeInterface(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringInterfaceType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringRawMessageType) + } + + i := 0 + m := *(*map[string]RawMessage)(p) + + if m == nil { + m = make(map[string]RawMessage, 64) + } + + var err error + var key string + var val RawMessage + var input = b + + b = b[1:] + for { + key = "" + val = nil + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeRawMessage(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringRawMessageType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringStringType) + } + + i := 0 + m := *(*map[string]string)(p) + + if m == nil { + m = make(map[string]string, 64) + } + + var err error + var key string + var val string + var input = b + + b = b[1:] + for { + key = "" + val = "" + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeString(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringStringSliceType) + } + + i := 0 + m := *(*map[string][]string)(p) + + if m == nil { + m = make(map[string][]string, 64) + } + + var err error + var key string + var buf []string + var input = b + var stringSize = unsafe.Sizeof("") + + b = b[1:] + for { + key = "" + buf = buf[:0] + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeSlice(b, unsafe.Pointer(&buf), stringSize, sliceStringType, decoder.decodeString) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + val := make([]string, len(buf)) + copy(val, buf) + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringBoolType) + } + + i := 0 + m := *(*map[string]bool)(p) + + if m == nil { + m = make(map[string]bool, 64) + } + + var err error + var key string + var val bool + var input = b + + b = b[1:] + for { + key = "" + val = false + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeBool(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, st.typ) + } + + var err error + var k []byte + var i int + + // memory buffer used to convert short field names to lowercase + var buf [64]byte + var key []byte + var input = b + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + i++ + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + k, b, _, err = d.parseStringUnquote(b, nil) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + var f *structField + if len(st.keyset) != 0 { + if n := keyset.Lookup(st.keyset, k); n < len(st.fields) { + f = &st.fields[n] + } + } else { + f = st.fieldsIndex[string(k)] + } + + if f == nil && (d.flags&DontMatchCaseInsensitiveStructFields) == 0 { + key = appendToLower(buf[:0], k) + f = st.ficaseIndex[string(key)] + } + + if f == nil { + if (d.flags & DisallowUnknownFields) != 0 { + return b, fmt.Errorf("json: unknown field %q", k) + } + if _, b, _, err = d.parseValue(b); err != nil { + return b, err + } + continue + } + + if b, err = f.codec.decode(d, b, unsafe.Pointer(uintptr(p)+f.offset)); err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = st.typ.String() + e.Struct + e.Field = d.prependField(string(k), e.Field) + } + return b, err + } + } +} + +func (d decoder) decodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) ([]byte, error) { + v := *(*unsafe.Pointer)(p) + + if v == nil { + if unexported { + return nil, fmt.Errorf("json: cannot set embedded pointer to unexported struct: %s", t) + } + v = unsafe.Pointer(reflect.New(t).Pointer()) + *(*unsafe.Pointer)(p) = v + } + + return decode(d, b, unsafe.Pointer(uintptr(v)+offset)) +} + +func (d decoder) decodePointer(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + pp := *(*unsafe.Pointer)(p) + if pp != nil && t.Kind() == reflect.Ptr { + return decode(d, b, pp) + } + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + v := *(*unsafe.Pointer)(p) + if v == nil { + v = unsafe.Pointer(reflect.New(t).Pointer()) + *(*unsafe.Pointer)(p) = v + } + + return decode(d, b, v) +} + +func (d decoder) decodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + val := *(*interface{})(p) + *(*interface{})(p) = nil + + if t := reflect.TypeOf(val); t != nil && t.Kind() == reflect.Ptr { + if v := reflect.ValueOf(val); v.IsNil() || t.Elem().Kind() != reflect.Ptr { + // If the destination is nil the only value that is OK to decode is + // `null`, and the encoding/json package always nils the destination + // interface value in this case. + if hasNullPrefix(b) { + *(*interface{})(p) = nil + return b[4:], nil + } + } + + b, err := Parse(b, val, d.flags) + if err == nil { + *(*interface{})(p) = val + } + return b, err + } + + v, b, k, err := d.parseValue(b) + if err != nil { + return b, err + } + + switch k.Class() { + case Object: + m := make(map[string]interface{}) + v, err = d.decodeMapStringInterface(v, unsafe.Pointer(&m)) + val = m + + case Array: + a := make([]interface{}, 0, 10) + v, err = d.decodeSlice(v, unsafe.Pointer(&a), unsafe.Sizeof(a[0]), sliceInterfaceType, decoder.decodeInterface) + val = a + + case String: + s := "" + v, err = d.decodeString(v, unsafe.Pointer(&s)) + val = s + + case Null: + v, val = nil, nil + + case Bool: + v, val = nil, k == True + + case Num: + if (d.flags & UseNumber) != 0 { + n := Number("") + v, err = d.decodeNumber(v, unsafe.Pointer(&n)) + val = n + } else { + f := 0.0 + v, err = d.decodeFloat64(v, unsafe.Pointer(&f)) + val = f + } + + default: + return b, syntaxError(v, "expected token but found '%c'", v[0]) + } + + if err != nil { + return b, err + } + + if v = skipSpaces(v); len(v) != 0 { + return b, syntaxError(v, "unexpected trailing trailing tokens after json value") + } + + *(*interface{})(p) = val + return b, nil +} + +func (d decoder) decodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + if hasNullPrefix(b) { + *(*interface{})(p) = nil + return b[4:], nil + } + + if x := reflect.NewAt(t, p).Elem(); !x.IsNil() { + if e := x.Elem(); e.Kind() == reflect.Ptr { + return Parse(b, e.Interface(), d.flags) + } + } else if t.NumMethod() == 0 { // empty interface + return Parse(b, (*interface{})(p), d.flags) + } + + return d.decodeUnmarshalTypeError(b, p, t) +} + +func (d decoder) decodeUnmarshalTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + v, b, _, err := d.parseValue(b) + if err != nil { + return b, err + } + return b, &UnmarshalTypeError{ + Value: string(v), + Type: t, + } +} + +func (d decoder) decodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + v, r, _, err := d.parseValue(b) + if err != nil { + return d.inputError(b, rawMessageType) + } + + if (d.flags & DontCopyRawMessage) == 0 { + v = append(make([]byte, 0, len(v)), v...) + } + + *(*RawMessage)(p) = json.RawMessage(v) + return r, err +} + +func (d decoder) decodeJSONUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v, b, _, err := d.parseValue(b) + if err != nil { + return b, err + } + + u := reflect.NewAt(t, p) + if !pointer { + u = u.Elem() + t = t.Elem() + } + if u.IsNil() { + u.Set(reflect.New(t)) + } + + return b, u.Interface().(Unmarshaler).UnmarshalJSON(v) +} + +func (d decoder) decodeTextUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + var value string + + v, b, k, err := d.parseValue(b) + if err != nil { + return b, err + } + if len(v) == 0 { + return d.inputError(v, t) + } + + switch k.Class() { + case Null: + return b, err + + case String: + s, _, _, err := d.parseStringUnquote(v, nil) + if err != nil { + return b, err + } + u := reflect.NewAt(t, p) + if !pointer { + u = u.Elem() + t = t.Elem() + } + if u.IsNil() { + u.Set(reflect.New(t)) + } + return b, u.Interface().(encoding.TextUnmarshaler).UnmarshalText(s) + + case Bool: + if k == True { + value = "true" + } else { + value = "false" + } + + case Num: + value = "number" + + case Object: + value = "object" + + case Array: + value = "array" + } + + return b, &UnmarshalTypeError{Value: value, Type: reflect.PtrTo(t)} +} + +func (d decoder) prependField(key, field string) string { + if field != "" { + return key + "." + field + } + return key +} + +func (d decoder) inputError(b []byte, t reflect.Type) ([]byte, error) { + if len(b) == 0 { + return nil, unexpectedEOF(b) + } + _, r, _, err := d.parseValue(b) + if err != nil { + return r, err + } + return skipSpaces(r), unmarshalTypeError(b, t) +} diff --git a/vendor/github.com/segmentio/encoding/json/encode.go b/vendor/github.com/segmentio/encoding/json/encode.go new file mode 100644 index 00000000000..acb3b67b5a1 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/encode.go @@ -0,0 +1,990 @@ +package json + +import ( + "encoding" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "sync" + "time" + "unicode/utf8" + "unsafe" + + "github.com/segmentio/asm/base64" +) + +const hex = "0123456789abcdef" + +func (e encoder) encodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { + return append(b, "null"...), nil +} + +func (e encoder) encodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { + if *(*bool)(p) { + return append(b, "true"...), nil + } + return append(b, "false"...), nil +} + +func (e encoder) encodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int)(p))), nil +} + +func (e encoder) encodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int8)(p))), nil +} + +func (e encoder) encodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int16)(p))), nil +} + +func (e encoder) encodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int32)(p))), nil +} + +func (e encoder) encodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, *(*int64)(p)), nil +} + +func (e encoder) encodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint)(p))), nil +} + +func (e encoder) encodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uintptr)(p))), nil +} + +func (e encoder) encodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint8)(p))), nil +} + +func (e encoder) encodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint16)(p))), nil +} + +func (e encoder) encodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint32)(p))), nil +} + +func (e encoder) encodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, *(*uint64)(p)), nil +} + +func (e encoder) encodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeFloat(b, float64(*(*float32)(p)), 32) +} + +func (e encoder) encodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeFloat(b, *(*float64)(p), 64) +} + +func (e encoder) encodeFloat(b []byte, f float64, bits int) ([]byte, error) { + switch { + case math.IsNaN(f): + return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "NaN"} + case math.IsInf(f, 0): + return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "inf"} + } + + // Convert as if by ES6 number to string conversion. + // This matches most other JSON generators. + // See golang.org/issue/6384 and golang.org/issue/14135. + // Like fmt %g, but the exponent cutoffs are different + // and exponents themselves are not padded to two digits. + abs := math.Abs(f) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + b = strconv.AppendFloat(b, f, fmt, -1, int(bits)) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' { + b[n-2] = b[n-1] + b = b[:n-1] + } + } + + return b, nil +} + +func (e encoder) encodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { + n := *(*Number)(p) + if n == "" { + n = "0" + } + + d := decoder{} + _, _, _, err := d.parseNumber(stringToBytes(string(n))) + if err != nil { + return b, err + } + + return append(b, n...), nil +} + +func (e encoder) encodeString(b []byte, p unsafe.Pointer) ([]byte, error) { + s := *(*string)(p) + if len(s) == 0 { + return append(b, `""`...), nil + } + i := 0 + j := 0 + escapeHTML := (e.flags & EscapeHTML) != 0 + + b = append(b, '"') + + if len(s) >= 8 { + if j = escapeIndex(s, escapeHTML); j < 0 { + return append(append(b, s...), '"'), nil + } + } + + for j < len(s) { + c := s[j] + + if c >= 0x20 && c <= 0x7f && c != '\\' && c != '"' && (!escapeHTML || (c != '<' && c != '>' && c != '&')) { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + b = append(b, s[i:j]...) + b = append(b, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + b = append(b, s[i:j]...) + b = append(b, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + b = append(b, s[i:j]...) + b = append(b, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + b = append(b, s[i:j]...) + b = append(b, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + b = append(b, s[i:j]...) + b = append(b, `\u00`...) + b = append(b, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + // This encodes bytes < 0x20 except for \t, \n and \r. + if c < 0x20 { + b = append(b, s[i:j]...) + b = append(b, `\u00`...) + b = append(b, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + r, size := utf8.DecodeRuneInString(s[j:]) + + if r == utf8.RuneError && size == 1 { + b = append(b, s[i:j]...) + b = append(b, `\ufffd`...) + i = j + size + j = j + size + continue + } + + switch r { + case '\u2028', '\u2029': + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + b = append(b, s[i:j]...) + b = append(b, `\u202`...) + b = append(b, hex[r&0xF]) + i = j + size + j = j + size + continue + } + + j += size + } + + b = append(b, s[i:]...) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeToString(b []byte, p unsafe.Pointer, encode encodeFunc) ([]byte, error) { + i := len(b) + + b, err := encode(e, b, p) + if err != nil { + return b, err + } + + j := len(b) + s := b[i:] + + if b, err = e.encodeString(b, unsafe.Pointer(&s)); err != nil { + return b, err + } + + n := copy(b[i:], b[j:]) + return b[:i+n], nil +} + +func (e encoder) encodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { + v := *(*[]byte)(p) + if v == nil { + return append(b, "null"...), nil + } + + n := base64.StdEncoding.EncodedLen(len(v)) + 2 + + if avail := cap(b) - len(b); avail < n { + newB := make([]byte, cap(b)+(n-avail)) + copy(newB, b) + b = newB[:len(b)] + } + + i := len(b) + j := len(b) + n + + b = b[:j] + b[i] = '"' + base64.StdEncoding.Encode(b[i+1:j-1], v) + b[j-1] = '"' + return b, nil +} + +func (e encoder) encodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { + b = append(b, '"') + b = appendDuration(b, *(*time.Duration)(p)) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { + t := *(*time.Time)(p) + b = append(b, '"') + b = t.AppendFormat(b, time.RFC3339Nano) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { + var start = len(b) + var err error + b = append(b, '[') + + for i := 0; i < n; i++ { + if i != 0 { + b = append(b, ',') + } + if b, err = encode(e, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))); err != nil { + return b[:start], err + } + } + + b = append(b, ']') + return b, nil +} + +func (e encoder) encodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { + s := (*slice)(p) + + if s.data == nil && s.len == 0 && s.cap == 0 { + return append(b, "null"...), nil + } + + return e.encodeArray(b, s.data, s.len, size, t, encode) +} + +func (e encoder) encodeMap(b []byte, p unsafe.Pointer, t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) ([]byte, error) { + m := reflect.NewAt(t, p).Elem() + if m.IsNil() { + return append(b, "null"...), nil + } + + keys := m.MapKeys() + if sortKeys != nil && (e.flags&SortMapKeys) != 0 { + sortKeys(keys) + } + + var start = len(b) + var err error + b = append(b, '{') + + for i, k := range keys { + v := m.MapIndex(k) + + if i != 0 { + b = append(b, ',') + } + + if b, err = encodeKey(e, b, (*iface)(unsafe.Pointer(&k)).ptr); err != nil { + return b[:start], err + } + + b = append(b, ':') + + if b, err = encodeValue(e, b, (*iface)(unsafe.Pointer(&v)).ptr); err != nil { + return b[:start], err + } + } + + b = append(b, '}') + return b, nil +} + +type element struct { + key string + val interface{} + raw RawMessage +} + +type mapslice struct { + elements []element +} + +func (m *mapslice) Len() int { return len(m.elements) } +func (m *mapslice) Less(i, j int) bool { return m.elements[i].key < m.elements[j].key } +func (m *mapslice) Swap(i, j int) { m.elements[i], m.elements[j] = m.elements[j], m.elements[i] } + +var mapslicePool = sync.Pool{ + New: func() interface{} { return new(mapslice) }, +} + +func (e encoder) encodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]interface{})(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = Append(b, v, e.flags) + if err != nil { + return b, err + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + s.elements = append(s.elements, element{key: key, val: val}) + } + sort.Sort(s) + + var start = len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = Append(b, elem.val, e.flags) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]RawMessage)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString doesn't return errors so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = e.encodeRawMessage(b, unsafe.Pointer(&v)) + if err != nil { + break + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, raw := range m { + s.elements = append(s.elements, element{key: key, raw: raw}) + } + sort.Sort(s) + + var start = len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = e.encodeRawMessage(b, unsafe.Pointer(&elem.raw)) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]string)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + b, _ = e.encodeString(b, unsafe.Pointer(&v)) + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + v := val + s.elements = append(s.elements, element{key: key, val: &v}) + } + sort.Sort(s) + + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + b, _ = e.encodeString(b, unsafe.Pointer(elem.val.(*string))) + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string][]string)(p) + if m == nil { + return append(b, "null"...), nil + } + + var stringSize = unsafe.Sizeof("") + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = e.encodeSlice(b, unsafe.Pointer(&v), stringSize, sliceStringType, encoder.encodeString) + if err != nil { + return b, err + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + v := val + s.elements = append(s.elements, element{key: key, val: &v}) + } + sort.Sort(s) + + var start = len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = e.encodeSlice(b, unsafe.Pointer(elem.val.(*[]string)), stringSize, sliceStringType, encoder.encodeString) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]bool)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + if v { + b = append(b, ":true"...) + } else { + b = append(b, ":false"...) + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + s.elements = append(s.elements, element{key: key, val: val}) + } + sort.Sort(s) + + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + if elem.val.(bool) { + b = append(b, ":true"...) + } else { + b = append(b, ":false"...) + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { + var start = len(b) + var err error + var k string + var n int + b = append(b, '{') + + escapeHTML := (e.flags & EscapeHTML) != 0 + + for i := range st.fields { + f := &st.fields[i] + v := unsafe.Pointer(uintptr(p) + f.offset) + + if f.omitempty && f.empty(v) { + continue + } + + if escapeHTML { + k = f.html + } else { + k = f.json + } + + lengthBeforeKey := len(b) + + if n != 0 { + b = append(b, k...) + } else { + b = append(b, k[1:]...) + } + + if b, err = f.codec.encode(e, b, v); err != nil { + if err == (rollback{}) { + b = b[:lengthBeforeKey] + continue + } + return b[:start], err + } + + n++ + } + + b = append(b, '}') + return b, nil +} + +type rollback struct{} + +func (rollback) Error() string { return "rollback" } + +func (e encoder) encodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) ([]byte, error) { + p = *(*unsafe.Pointer)(p) + if p == nil { + return b, rollback{} + } + return encode(e, b, unsafe.Pointer(uintptr(p)+offset)) +} + +func (e encoder) encodePointer(b []byte, p unsafe.Pointer, t reflect.Type, encode encodeFunc) ([]byte, error) { + if p = *(*unsafe.Pointer)(p); p != nil { + if e.ptrDepth++; e.ptrDepth >= startDetectingCyclesAfter { + if _, seen := e.ptrSeen[p]; seen { + // TODO: reconstruct the reflect.Value from p + t so we can set + // the erorr's Value field? + return b, &UnsupportedValueError{Str: fmt.Sprintf("encountered a cycle via %s", t)} + } + if e.ptrSeen == nil { + e.ptrSeen = make(map[unsafe.Pointer]struct{}) + } + e.ptrSeen[p] = struct{}{} + defer delete(e.ptrSeen, p) + } + return encode(e, b, p) + } + return e.encodeNull(b, nil) +} + +func (e encoder) encodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + return Append(b, *(*interface{})(p), e.flags) +} + +func (e encoder) encodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + return Append(b, reflect.NewAt(t, p).Elem().Interface(), e.flags) +} + +func (e encoder) encodeUnsupportedTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + return b, &UnsupportedTypeError{Type: t} +} + +func (e encoder) encodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + v := *(*RawMessage)(p) + + if v == nil { + return append(b, "null"...), nil + } + + var s []byte + + if (e.flags & TrustRawMessage) != 0 { + s = v + } else { + var err error + d := decoder{} + s, _, _, err = d.parseValue(v) + if err != nil { + return b, &UnsupportedValueError{Value: reflect.ValueOf(v), Str: err.Error()} + } + } + + if (e.flags & EscapeHTML) != 0 { + return appendCompactEscapeHTML(b, s), nil + } + + return append(b, s...), nil +} + +func (e encoder) encodeJSONMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v := reflect.NewAt(t, p) + + if !pointer { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return append(b, "null"...), nil + } + } + + j, err := v.Interface().(Marshaler).MarshalJSON() + if err != nil { + return b, err + } + + d := decoder{} + s, _, _, err := d.parseValue(j) + if err != nil { + return b, &MarshalerError{Type: t, Err: err} + } + + if (e.flags & EscapeHTML) != 0 { + return appendCompactEscapeHTML(b, s), nil + } + + return append(b, s...), nil +} + +func (e encoder) encodeTextMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v := reflect.NewAt(t, p) + + if !pointer { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return append(b, `null`...), nil + } + } + + s, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return b, err + } + + return e.encodeString(b, unsafe.Pointer(&s)) +} + +func appendCompactEscapeHTML(dst []byte, src []byte) []byte { + start := 0 + escape := false + inString := false + + for i, c := range src { + if !inString { + switch c { + case '"': // enter string + inString = true + case ' ', '\n', '\r', '\t': // skip space + if start < i { + dst = append(dst, src[start:i]...) + } + start = i + 1 + } + continue + } + + if escape { + escape = false + continue + } + + if c == '\\' { + escape = true + continue + } + + if c == '"' { + inString = false + continue + } + + if c == '<' || c == '>' || c == '&' { + if start < i { + dst = append(dst, src[start:i]...) + } + dst = append(dst, `\u00`...) + dst = append(dst, hex[c>>4], hex[c&0xF]) + start = i + 1 + continue + } + + // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9). + if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 { + if start < i { + dst = append(dst, src[start:i]...) + } + dst = append(dst, `\u202`...) + dst = append(dst, hex[src[i+2]&0xF]) + start = i + 3 + continue + } + } + + if start < len(src) { + dst = append(dst, src[start:]...) + } + + return dst +} diff --git a/vendor/github.com/segmentio/encoding/json/int.go b/vendor/github.com/segmentio/encoding/json/int.go new file mode 100644 index 00000000000..b53149cbd7a --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/int.go @@ -0,0 +1,98 @@ +package json + +import ( + "unsafe" +) + +var endianness int + +func init() { + var b [2]byte + *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) + + switch b[0] { + case 0xCD: + endianness = 0 // LE + case 0xAB: + endianness = 1 // BE + default: + panic("could not determine endianness") + } +} + +// "00010203...96979899" cast to []uint16 +var intLELookup = [100]uint16{ + 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, + 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, + 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, + 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, + 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, + 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, + 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, + 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, + 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, + 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, +} + +var intBELookup = [100]uint16{ + 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, + 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, + 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, + 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, + 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, + 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, + 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, + 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, + 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, + 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, +} + +var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} + +func appendInt(b []byte, n int64) []byte { + return formatInteger(b, uint64(n), n < 0) +} + +func appendUint(b []byte, n uint64) []byte { + return formatInteger(b, n, false) +} + +func formatInteger(out []byte, n uint64, negative bool) []byte { + if !negative { + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + } else { + n = -n + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + if negative { + i-- + b[i] = '-' + } + + return append(out, b[i:]...) +} diff --git a/vendor/github.com/segmentio/encoding/json/json.go b/vendor/github.com/segmentio/encoding/json/json.go new file mode 100644 index 00000000000..47f3ba1732a --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/json.go @@ -0,0 +1,582 @@ +package json + +import ( + "bytes" + "encoding/json" + "io" + "math/bits" + "reflect" + "runtime" + "sync" + "unsafe" +) + +// Delim is documented at https://golang.org/pkg/encoding/json/#Delim +type Delim = json.Delim + +// InvalidUTF8Error is documented at https://golang.org/pkg/encoding/json/#InvalidUTF8Error +type InvalidUTF8Error = json.InvalidUTF8Error + +// InvalidUnmarshalError is documented at https://golang.org/pkg/encoding/json/#InvalidUnmarshalError +type InvalidUnmarshalError = json.InvalidUnmarshalError + +// Marshaler is documented at https://golang.org/pkg/encoding/json/#Marshaler +type Marshaler = json.Marshaler + +// MarshalerError is documented at https://golang.org/pkg/encoding/json/#MarshalerError +type MarshalerError = json.MarshalerError + +// Number is documented at https://golang.org/pkg/encoding/json/#Number +type Number = json.Number + +// RawMessage is documented at https://golang.org/pkg/encoding/json/#RawMessage +type RawMessage = json.RawMessage + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError = json.SyntaxError + +// Token is documented at https://golang.org/pkg/encoding/json/#Token +type Token = json.Token + +// UnmarshalFieldError is documented at https://golang.org/pkg/encoding/json/#UnmarshalFieldError +type UnmarshalFieldError = json.UnmarshalFieldError + +// UnmarshalTypeError is documented at https://golang.org/pkg/encoding/json/#UnmarshalTypeError +type UnmarshalTypeError = json.UnmarshalTypeError + +// Unmarshaler is documented at https://golang.org/pkg/encoding/json/#Unmarshaler +type Unmarshaler = json.Unmarshaler + +// UnsupportedTypeError is documented at https://golang.org/pkg/encoding/json/#UnsupportedTypeError +type UnsupportedTypeError = json.UnsupportedTypeError + +// UnsupportedValueError is documented at https://golang.org/pkg/encoding/json/#UnsupportedValueError +type UnsupportedValueError = json.UnsupportedValueError + +// AppendFlags is a type used to represent configuration options that can be +// applied when formatting json output. +type AppendFlags uint32 + +const ( + // EscapeHTML is a formatting flag used to to escape HTML in json strings. + EscapeHTML AppendFlags = 1 << iota + + // SortMapKeys is formatting flag used to enable sorting of map keys when + // encoding JSON (this matches the behavior of the standard encoding/json + // package). + SortMapKeys + + // TrustRawMessage is a performance optimization flag to skip value + // checking of raw messages. It should only be used if the values are + // known to be valid json (e.g., they were created by json.Unmarshal). + TrustRawMessage + + // appendNewline is a formatting flag to enable the addition of a newline + // in Encode (this matches the behavior of the standard encoding/json + // package). + appendNewline +) + +// ParseFlags is a type used to represent configuration options that can be +// applied when parsing json input. +type ParseFlags uint32 + +func (flags ParseFlags) has(f ParseFlags) bool { + return (flags & f) != 0 +} + +func (f ParseFlags) kind() Kind { + return Kind((f >> kindOffset) & 0xFF) +} + +func (f ParseFlags) withKind(kind Kind) ParseFlags { + return (f & ^(ParseFlags(0xFF) << kindOffset)) | (ParseFlags(kind) << kindOffset) +} + +const ( + // DisallowUnknownFields is a parsing flag used to prevent decoding of + // objects to Go struct values when a field of the input does not match + // with any of the struct fields. + DisallowUnknownFields ParseFlags = 1 << iota + + // UseNumber is a parsing flag used to load numeric values as Number + // instead of float64. + UseNumber + + // DontCopyString is a parsing flag used to provide zero-copy support when + // loading string values from a json payload. It is not always possible to + // avoid dynamic memory allocations, for example when a string is escaped in + // the json data a new buffer has to be allocated, but when the `wire` value + // can be used as content of a Go value the decoder will simply point into + // the input buffer. + DontCopyString + + // DontCopyNumber is a parsing flag used to provide zero-copy support when + // loading Number values (see DontCopyString and DontCopyRawMessage). + DontCopyNumber + + // DontCopyRawMessage is a parsing flag used to provide zero-copy support + // when loading RawMessage values from a json payload. When used, the + // RawMessage values will not be allocated into new memory buffers and + // will instead point directly to the area of the input buffer where the + // value was found. + DontCopyRawMessage + + // DontMatchCaseInsensitiveStructFields is a parsing flag used to prevent + // matching fields in a case-insensitive way. This can prevent degrading + // performance on case conversions, and can also act as a stricter decoding + // mode. + DontMatchCaseInsensitiveStructFields + + // ZeroCopy is a parsing flag that combines all the copy optimizations + // available in the package. + // + // The zero-copy optimizations are better used in request-handler style + // code where none of the values are retained after the handler returns. + ZeroCopy = DontCopyString | DontCopyNumber | DontCopyRawMessage + + // validAsciiPrint is an internal flag indicating that the input contains + // only valid ASCII print chars (0x20 <= c <= 0x7E). If the flag is unset, + // it's unknown whether the input is valid ASCII print. + validAsciiPrint ParseFlags = 1 << 28 + + // noBackslach is an internal flag indicating that the input does not + // contain a backslash. If the flag is unset, it's unknown whether the + // input contains a backslash. + noBackslash ParseFlags = 1 << 29 + + // Bit offset where the kind of the json value is stored. + // + // See Kind in token.go for the enum. + kindOffset ParseFlags = 16 +) + +// Kind represents the different kinds of value that exist in JSON. +type Kind uint + +const ( + Undefined Kind = 0 + + Null Kind = 1 // Null is not zero, so we keep zero for "undefined". + + Bool Kind = 2 // Bit two is set to 1, means it's a boolean. + False Kind = 2 // Bool + 0 + True Kind = 3 // Bool + 1 + + Num Kind = 4 // Bit three is set to 1, means it's a number. + Uint Kind = 5 // Num + 1 + Int Kind = 6 // Num + 2 + Float Kind = 7 // Num + 3 + + String Kind = 8 // Bit four is set to 1, means it's a string. + Unescaped Kind = 9 // String + 1 + + Array Kind = 16 // Equivalent to Delim == '[' + Object Kind = 32 // Equivalent to Delim == '{' +) + +// Class returns the class of k. +func (k Kind) Class() Kind { return Kind(1 << uint(bits.Len(uint(k))-1)) } + +// Append acts like Marshal but appends the json representation to b instead of +// always reallocating a new slice. +func Append(b []byte, x interface{}, flags AppendFlags) ([]byte, error) { + if x == nil { + // Special case for nil values because it makes the rest of the code + // simpler to assume that it won't be seeing nil pointers. + return append(b, "null"...), nil + } + + t := reflect.TypeOf(x) + p := (*iface)(unsafe.Pointer(&x)).ptr + + cache := cacheLoad() + c, found := cache[typeid(t)] + + if !found { + c = constructCachedCodec(t, cache) + } + + b, err := c.encode(encoder{flags: flags}, b, p) + runtime.KeepAlive(x) + return b, err +} + +// Escape is a convenience helper to construct an escaped JSON string from s. +// The function escales HTML characters, for more control over the escape +// behavior and to write to a pre-allocated buffer, use AppendEscape. +func Escape(s string) []byte { + // +10 for extra escape characters, maybe not enough and the buffer will + // be reallocated. + b := make([]byte, 0, len(s)+10) + return AppendEscape(b, s, EscapeHTML) +} + +// AppendEscape appends s to b with the string escaped as a JSON value. +// This will include the starting and ending quote characters, and the +// appropriate characters will be escaped correctly for JSON encoding. +func AppendEscape(b []byte, s string, flags AppendFlags) []byte { + e := encoder{flags: flags} + b, _ = e.encodeString(b, unsafe.Pointer(&s)) + return b +} + +// Unescape is a convenience helper to unescape a JSON value. +// For more control over the unescape behavior and +// to write to a pre-allocated buffer, use AppendUnescape. +func Unescape(s []byte) []byte { + b := make([]byte, 0, len(s)) + return AppendUnescape(b, s, ParseFlags(0)) +} + +// AppendUnescape appends s to b with the string unescaped as a JSON value. +// This will remove starting and ending quote characters, and the +// appropriate characters will be escaped correctly as if JSON decoded. +// New space will be reallocated if more space is needed. +func AppendUnescape(b []byte, s []byte, flags ParseFlags) []byte { + d := decoder{flags: flags} + buf := new(string) + d.decodeString(s, unsafe.Pointer(buf)) + return append(b, *buf...) +} + +// Compact is documented at https://golang.org/pkg/encoding/json/#Compact +func Compact(dst *bytes.Buffer, src []byte) error { + return json.Compact(dst, src) +} + +// HTMLEscape is documented at https://golang.org/pkg/encoding/json/#HTMLEscape +func HTMLEscape(dst *bytes.Buffer, src []byte) { + json.HTMLEscape(dst, src) +} + +// Indent is documented at https://golang.org/pkg/encoding/json/#Indent +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return json.Indent(dst, src, prefix, indent) +} + +// Marshal is documented at https://golang.org/pkg/encoding/json/#Marshal +func Marshal(x interface{}) ([]byte, error) { + var err error + var buf = encoderBufferPool.Get().(*encoderBuffer) + + if buf.data, err = Append(buf.data[:0], x, EscapeHTML|SortMapKeys); err != nil { + return nil, err + } + + b := make([]byte, len(buf.data)) + copy(b, buf.data) + encoderBufferPool.Put(buf) + return b, nil +} + +// MarshalIndent is documented at https://golang.org/pkg/encoding/json/#MarshalIndent +func MarshalIndent(x interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(x) + + if err == nil { + tmp := &bytes.Buffer{} + tmp.Grow(2 * len(b)) + + Indent(tmp, b, prefix, indent) + b = tmp.Bytes() + } + + return b, err +} + +// Unmarshal is documented at https://golang.org/pkg/encoding/json/#Unmarshal +func Unmarshal(b []byte, x interface{}) error { + r, err := Parse(b, x, 0) + if len(r) != 0 { + if _, ok := err.(*SyntaxError); !ok { + // The encoding/json package prioritizes reporting errors caused by + // unexpected trailing bytes over other issues; here we emulate this + // behavior by overriding the error. + err = syntaxError(r, "invalid character '%c' after top-level value", r[0]) + } + } + return err +} + +// Parse behaves like Unmarshal but the caller can pass a set of flags to +// configure the parsing behavior. +func Parse(b []byte, x interface{}, flags ParseFlags) ([]byte, error) { + t := reflect.TypeOf(x) + p := (*iface)(unsafe.Pointer(&x)).ptr + + d := decoder{flags: flags | internalParseFlags(b)} + + b = skipSpaces(b) + + if t == nil || p == nil || t.Kind() != reflect.Ptr { + _, r, _, err := d.parseValue(b) + r = skipSpaces(r) + if err != nil { + return r, err + } + return r, &InvalidUnmarshalError{Type: t} + } + t = t.Elem() + + cache := cacheLoad() + c, found := cache[typeid(t)] + + if !found { + c = constructCachedCodec(t, cache) + } + + r, err := c.decode(d, b, p) + return skipSpaces(r), err +} + +// Valid is documented at https://golang.org/pkg/encoding/json/#Valid +func Valid(data []byte) bool { + data = skipSpaces(data) + d := decoder{flags: internalParseFlags(data)} + _, data, _, err := d.parseValue(data) + if err != nil { + return false + } + return len(skipSpaces(data)) == 0 +} + +// Decoder is documented at https://golang.org/pkg/encoding/json/#Decoder +type Decoder struct { + reader io.Reader + buffer []byte + remain []byte + inputOffset int64 + err error + flags ParseFlags +} + +// NewDecoder is documented at https://golang.org/pkg/encoding/json/#NewDecoder +func NewDecoder(r io.Reader) *Decoder { return &Decoder{reader: r} } + +// Buffered is documented at https://golang.org/pkg/encoding/json/#Decoder.Buffered +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.remain) +} + +// Decode is documented at https://golang.org/pkg/encoding/json/#Decoder.Decode +func (dec *Decoder) Decode(v interface{}) error { + raw, err := dec.readValue() + if err != nil { + return err + } + _, err = Parse(raw, v, dec.flags) + return err +} + +const ( + minBufferSize = 32768 + minReadSize = 4096 +) + +// readValue reads one JSON value from the buffer and returns its raw bytes. It +// is optimized for the "one JSON value per line" case. +func (dec *Decoder) readValue() (v []byte, err error) { + var n int + var r []byte + d := decoder{flags: dec.flags} + + for { + if len(dec.remain) != 0 { + v, r, _, err = d.parseValue(dec.remain) + if err == nil { + dec.remain, n = skipSpacesN(r) + dec.inputOffset += int64(len(v) + n) + return + } + if len(r) != 0 { + // Parsing of the next JSON value stopped at a position other + // than the end of the input buffer, which indicaates that a + // syntax error was encountered. + return + } + } + + if err = dec.err; err != nil { + if len(dec.remain) != 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return + } + + if dec.buffer == nil { + dec.buffer = make([]byte, 0, minBufferSize) + } else { + dec.buffer = dec.buffer[:copy(dec.buffer[:cap(dec.buffer)], dec.remain)] + dec.remain = nil + } + + if (cap(dec.buffer) - len(dec.buffer)) < minReadSize { + buf := make([]byte, len(dec.buffer), 2*cap(dec.buffer)) + copy(buf, dec.buffer) + dec.buffer = buf + } + + n, err = io.ReadFull(dec.reader, dec.buffer[len(dec.buffer):cap(dec.buffer)]) + if n > 0 { + dec.buffer = dec.buffer[:len(dec.buffer)+n] + if err != nil { + err = nil + } + } else if err == io.ErrUnexpectedEOF { + err = io.EOF + } + dec.remain, n = skipSpacesN(dec.buffer) + d.flags = dec.flags | internalParseFlags(dec.remain) + dec.inputOffset += int64(n) + dec.err = err + } +} + +// DisallowUnknownFields is documented at https://golang.org/pkg/encoding/json/#Decoder.DisallowUnknownFields +func (dec *Decoder) DisallowUnknownFields() { dec.flags |= DisallowUnknownFields } + +// UseNumber is documented at https://golang.org/pkg/encoding/json/#Decoder.UseNumber +func (dec *Decoder) UseNumber() { dec.flags |= UseNumber } + +// DontCopyString is an extension to the standard encoding/json package +// which instructs the decoder to not copy strings loaded from the json +// payloads when possible. +func (dec *Decoder) DontCopyString() { dec.flags |= DontCopyString } + +// DontCopyNumber is an extension to the standard encoding/json package +// which instructs the decoder to not copy numbers loaded from the json +// payloads. +func (dec *Decoder) DontCopyNumber() { dec.flags |= DontCopyNumber } + +// DontCopyRawMessage is an extension to the standard encoding/json package +// which instructs the decoder to not allocate RawMessage values in separate +// memory buffers (see the documentation of the DontcopyRawMessage flag for +// more detais). +func (dec *Decoder) DontCopyRawMessage() { dec.flags |= DontCopyRawMessage } + +// DontMatchCaseInsensitiveStructFields is an extension to the standard +// encoding/json package which instructs the decoder to not match object fields +// against struct fields in a case-insensitive way, the field names have to +// match exactly to be decoded into the struct field values. +func (dec *Decoder) DontMatchCaseInsensitiveStructFields() { + dec.flags |= DontMatchCaseInsensitiveStructFields +} + +// ZeroCopy is an extension to the standard encoding/json package which enables +// all the copy optimizations of the decoder. +func (dec *Decoder) ZeroCopy() { dec.flags |= ZeroCopy } + +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token +// and the beginning of the next token. +func (dec *Decoder) InputOffset() int64 { + return dec.inputOffset +} + +// Encoder is documented at https://golang.org/pkg/encoding/json/#Encoder +type Encoder struct { + writer io.Writer + prefix string + indent string + buffer *bytes.Buffer + err error + flags AppendFlags +} + +// NewEncoder is documented at https://golang.org/pkg/encoding/json/#NewEncoder +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{writer: w, flags: EscapeHTML | SortMapKeys | appendNewline} +} + +// Encode is documented at https://golang.org/pkg/encoding/json/#Encoder.Encode +func (enc *Encoder) Encode(v interface{}) error { + if enc.err != nil { + return enc.err + } + + var err error + var buf = encoderBufferPool.Get().(*encoderBuffer) + + buf.data, err = Append(buf.data[:0], v, enc.flags) + + if err != nil { + encoderBufferPool.Put(buf) + return err + } + + if (enc.flags & appendNewline) != 0 { + buf.data = append(buf.data, '\n') + } + b := buf.data + + if enc.prefix != "" || enc.indent != "" { + if enc.buffer == nil { + enc.buffer = new(bytes.Buffer) + enc.buffer.Grow(2 * len(buf.data)) + } else { + enc.buffer.Reset() + } + Indent(enc.buffer, buf.data, enc.prefix, enc.indent) + b = enc.buffer.Bytes() + } + + if _, err := enc.writer.Write(b); err != nil { + enc.err = err + } + + encoderBufferPool.Put(buf) + return err +} + +// SetEscapeHTML is documented at https://golang.org/pkg/encoding/json/#Encoder.SetEscapeHTML +func (enc *Encoder) SetEscapeHTML(on bool) { + if on { + enc.flags |= EscapeHTML + } else { + enc.flags &= ^EscapeHTML + } +} + +// SetIndent is documented at https://golang.org/pkg/encoding/json/#Encoder.SetIndent +func (enc *Encoder) SetIndent(prefix, indent string) { + enc.prefix = prefix + enc.indent = indent +} + +// SetSortMapKeys is an extension to the standard encoding/json package which +// allows the program to toggle sorting of map keys on and off. +func (enc *Encoder) SetSortMapKeys(on bool) { + if on { + enc.flags |= SortMapKeys + } else { + enc.flags &= ^SortMapKeys + } +} + +// SetTrustRawMessage skips value checking when encoding a raw json message. It should only +// be used if the values are known to be valid json, e.g. because they were originally created +// by json.Unmarshal. +func (enc *Encoder) SetTrustRawMessage(on bool) { + if on { + enc.flags |= TrustRawMessage + } else { + enc.flags &= ^TrustRawMessage + } +} + +// SetAppendNewline is an extension to the standard encoding/json package which +// allows the program to toggle the addition of a newline in Encode on or off. +func (enc *Encoder) SetAppendNewline(on bool) { + if on { + enc.flags |= appendNewline + } else { + enc.flags &= ^appendNewline + } +} + +var encoderBufferPool = sync.Pool{ + New: func() interface{} { return &encoderBuffer{data: make([]byte, 0, 4096)} }, +} + +type encoderBuffer struct{ data []byte } diff --git a/vendor/github.com/segmentio/encoding/json/parse.go b/vendor/github.com/segmentio/encoding/json/parse.go new file mode 100644 index 00000000000..3e656217ec7 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/parse.go @@ -0,0 +1,787 @@ +package json + +import ( + "bytes" + "encoding/binary" + "math" + "math/bits" + "reflect" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "github.com/segmentio/encoding/ascii" +) + +// All spaces characters defined in the json specification. +const ( + sp = ' ' + ht = '\t' + nl = '\n' + cr = '\r' +) + +const ( + escape = '\\' + quote = '"' +) + +func internalParseFlags(b []byte) (flags ParseFlags) { + // Don't consider surrounding whitespace + b = skipSpaces(b) + b = trimTrailingSpaces(b) + if ascii.ValidPrint(b) { + flags |= validAsciiPrint + } + if bytes.IndexByte(b, '\\') == -1 { + flags |= noBackslash + } + return +} + +func skipSpaces(b []byte) []byte { + if len(b) > 0 && b[0] <= 0x20 { + b, _ = skipSpacesN(b) + } + return b +} + +func skipSpacesN(b []byte) ([]byte, int) { + for i := range b { + switch b[i] { + case sp, ht, nl, cr: + default: + return b[i:], i + } + } + return nil, 0 +} + +func trimTrailingSpaces(b []byte) []byte { + if len(b) > 0 && b[len(b)-1] <= 0x20 { + b = trimTrailingSpacesN(b) + } + return b +} + +func trimTrailingSpacesN(b []byte) []byte { + i := len(b) - 1 +loop: + for ; i >= 0; i-- { + switch b[i] { + case sp, ht, nl, cr: + default: + break loop + } + } + return b[:i+1] +} + +// parseInt parses a decimal representation of an int64 from b. +// +// The function is equivalent to calling strconv.ParseInt(string(b), 10, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseInt). +// +// Because it only works with base 10 the function is also significantly faster +// than strconv.ParseInt. +func (d decoder) parseInt(b []byte, t reflect.Type) (int64, []byte, error) { + var value int64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode integer from an empty input") + } + + if b[0] == '-' { + const max = math.MinInt64 + const lim = max / 10 + + if len(b) == 1 { + return 0, b, syntaxError(b, "cannot decode integer from '-'") + } + + if len(b) > 2 && b[1] == '0' && '0' <= b[2] && b[2] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for _, c := range b[1:] { + if !(c >= '0' && c <= '9') { + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + break + } + + if value < lim { + return 0, b, unmarshalOverflow(b, t) + } + + value *= 10 + x := int64(c - '0') + + if value < (max + x) { + return 0, b, unmarshalOverflow(b, t) + } + + value -= x + count++ + } + + count++ + } else { + if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ { + x := int64(b[count] - '0') + next := value*10 + x + if next < value { + return 0, b, unmarshalOverflow(b, t) + } + value = next + } + + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + } + + if count < len(b) { + switch b[count] { + case '.', 'e', 'E': // was this actually a float? + v, r, _, err := d.parseNumber(b) + if err != nil { + v, r = b[:count+1], b[count+1:] + } + return 0, r, unmarshalTypeError(v, t) + } + } + + return value, b[count:], nil +} + +// parseUint is like parseInt but for unsigned integers. +func (d decoder) parseUint(b []byte, t reflect.Type) (uint64, []byte, error) { + var value uint64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode integer value from an empty input") + } + + if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ { + x := uint64(b[count] - '0') + next := value*10 + x + if next < value { + return 0, b, unmarshalOverflow(b, t) + } + value = next + } + + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + + if count < len(b) { + switch b[count] { + case '.', 'e', 'E': // was this actually a float? + v, r, _, err := d.parseNumber(b) + if err != nil { + v, r = b[:count+1], b[count+1:] + } + return 0, r, unmarshalTypeError(v, t) + } + } + + return value, b[count:], nil +} + +// parseUintHex parses a hexadecimanl representation of a uint64 from b. +// +// The function is equivalent to calling strconv.ParseUint(string(b), 16, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseUint). +// +// Because it only works with base 16 the function is also significantly faster +// than strconv.ParseUint. +func (d decoder) parseUintHex(b []byte) (uint64, []byte, error) { + const max = math.MaxUint64 + const lim = max / 0x10 + + var value uint64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode hexadecimal value from an empty input") + } + +parseLoop: + for i, c := range b { + var x uint64 + + switch { + case c >= '0' && c <= '9': + x = uint64(c - '0') + + case c >= 'A' && c <= 'F': + x = uint64(c-'A') + 0xA + + case c >= 'a' && c <= 'f': + x = uint64(c-'a') + 0xA + + default: + if i == 0 { + return 0, b, syntaxError(b, "expected hexadecimal digit but found '%c'", c) + } + break parseLoop + } + + if value > lim { + return 0, b, syntaxError(b, "hexadecimal value out of range") + } + + if value *= 0x10; value > (max - x) { + return 0, b, syntaxError(b, "hexadecimal value out of range") + } + + value += x + count++ + } + + return value, b[count:], nil +} + +func (d decoder) parseNull(b []byte) ([]byte, []byte, Kind, error) { + if hasNullPrefix(b) { + return b[:4], b[4:], Null, nil + } + if len(b) < 4 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'null' but found invalid token") +} + +func (d decoder) parseTrue(b []byte) ([]byte, []byte, Kind, error) { + if hasTruePrefix(b) { + return b[:4], b[4:], True, nil + } + if len(b) < 4 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'true' but found invalid token") +} + +func (d decoder) parseFalse(b []byte) ([]byte, []byte, Kind, error) { + if hasFalsePrefix(b) { + return b[:5], b[5:], False, nil + } + if len(b) < 5 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'false' but found invalid token") +} + +func (d decoder) parseNumber(b []byte) (v, r []byte, kind Kind, err error) { + if len(b) == 0 { + r, err = b, unexpectedEOF(b) + return + } + + // Assume it's an unsigned integer at first. + kind = Uint + + i := 0 + // sign + if b[i] == '-' { + kind = Int + i++ + } + + if i == len(b) { + r, err = b[i:], syntaxError(b, "missing number value after sign") + return + } + + if b[i] < '0' || b[i] > '9' { + r, err = b[i:], syntaxError(b, "expected digit but got '%c'", b[i]) + return + } + + // integer part + if b[i] == '0' { + i++ + if i == len(b) || (b[i] != '.' && b[i] != 'e' && b[i] != 'E') { + v, r = b[:i], b[i:] + return + } + if '0' <= b[i] && b[i] <= '9' { + r, err = b[i:], syntaxError(b, "cannot decode number with leading '0' character") + return + } + } + + for i < len(b) && '0' <= b[i] && b[i] <= '9' { + i++ + } + + // decimal part + if i < len(b) && b[i] == '.' { + kind = Float + i++ + decimalStart := i + + for i < len(b) { + if c := b[i]; !('0' <= c && c <= '9') { + if i == decimalStart { + r, err = b[i:], syntaxError(b, "expected digit but found '%c'", c) + return + } + break + } + i++ + } + + if i == decimalStart { + r, err = b[i:], syntaxError(b, "expected decimal part after '.'") + return + } + } + + // exponent part + if i < len(b) && (b[i] == 'e' || b[i] == 'E') { + kind = Float + i++ + + if i < len(b) { + if c := b[i]; c == '+' || c == '-' { + i++ + } + } + + if i == len(b) { + r, err = b[i:], syntaxError(b, "missing exponent in number") + return + } + + exponentStart := i + + for i < len(b) { + if c := b[i]; !('0' <= c && c <= '9') { + if i == exponentStart { + err = syntaxError(b, "expected digit but found '%c'", c) + return + } + break + } + i++ + } + } + + v, r = b[:i], b[i:] + return +} + +func (d decoder) parseUnicode(b []byte) (rune, int, error) { + if len(b) < 4 { + return 0, len(b), syntaxError(b, "unicode code point must have at least 4 characters") + } + + u, r, err := d.parseUintHex(b[:4]) + if err != nil { + return 0, 4, syntaxError(b, "parsing unicode code point: %s", err) + } + + if len(r) != 0 { + return 0, 4, syntaxError(b, "invalid unicode code point") + } + + return rune(u), 4, nil +} + +func (d decoder) parseString(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + if b[0] != '"' { + return nil, b, Undefined, syntaxError(b, "expected '\"' at the beginning of a string value") + } + + var n int + if len(b) >= 9 { + // This is an optimization for short strings. We read 8/16 bytes, + // and XOR each with 0x22 (") so that these bytes (and only + // these bytes) are now zero. We use the hasless(u,1) trick + // from https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord + // to determine whether any bytes are zero. Finally, we CTZ + // to find the index of that byte. + const mask1 = 0x2222222222222222 + const mask2 = 0x0101010101010101 + const mask3 = 0x8080808080808080 + u := binary.LittleEndian.Uint64(b[1:]) ^ mask1 + if mask := (u - mask2) & ^u & mask3; mask != 0 { + n = bits.TrailingZeros64(mask)/8 + 2 + goto found + } + if len(b) >= 17 { + u = binary.LittleEndian.Uint64(b[9:]) ^ mask1 + if mask := (u - mask2) & ^u & mask3; mask != 0 { + n = bits.TrailingZeros64(mask)/8 + 10 + goto found + } + } + } + n = bytes.IndexByte(b[1:], '"') + 2 + if n <= 1 { + return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value") + } +found: + if (d.flags.has(noBackslash) || bytes.IndexByte(b[1:n], '\\') < 0) && + (d.flags.has(validAsciiPrint) || ascii.ValidPrint(b[1:n])) { + return b[:n], b[n:], Unescaped, nil + } + + for i := 1; i < len(b); i++ { + switch b[i] { + case '\\': + if i++; i < len(b) { + switch b[i] { + case '"', '\\', '/', 'n', 'r', 't', 'f', 'b': + case 'u': + _, n, err := d.parseUnicode(b[i+1:]) + if err != nil { + return nil, b[i+1+n:], Undefined, err + } + i += n + default: + return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i]) + } + } + + case '"': + return b[:i+1], b[i+1:], String, nil + + default: + if b[i] < 0x20 { + return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i]) + } + } + } + + return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value") +} + +func (d decoder) parseStringUnquote(b []byte, r []byte) ([]byte, []byte, bool, error) { + s, b, k, err := d.parseString(b) + if err != nil { + return s, b, false, err + } + + s = s[1 : len(s)-1] // trim the quotes + + if k == Unescaped { + return s, b, false, nil + } + + if r == nil { + r = make([]byte, 0, len(s)) + } + + for len(s) != 0 { + i := bytes.IndexByte(s, '\\') + + if i < 0 { + r = appendCoerceInvalidUTF8(r, s) + break + } + + r = appendCoerceInvalidUTF8(r, s[:i]) + s = s[i+1:] + + c := s[0] + switch c { + case '"', '\\', '/': + // simple escaped character + case 'n': + c = '\n' + + case 'r': + c = '\r' + + case 't': + c = '\t' + + case 'b': + c = '\b' + + case 'f': + c = '\f' + + case 'u': + s = s[1:] + + r1, n1, err := d.parseUnicode(s) + if err != nil { + return r, b, true, err + } + s = s[n1:] + + if utf16.IsSurrogate(r1) { + if !hasPrefix(s, `\u`) { + r1 = unicode.ReplacementChar + } else { + r2, n2, err := d.parseUnicode(s[2:]) + if err != nil { + return r, b, true, err + } + if r1 = utf16.DecodeRune(r1, r2); r1 != unicode.ReplacementChar { + s = s[2+n2:] + } + } + } + + r = appendRune(r, r1) + continue + + default: // not sure what this escape sequence is + return r, b, false, syntaxError(s, "invalid character '%c' in string escape code", c) + } + + r = append(r, c) + s = s[1:] + } + + return r, b, true, nil +} + +func appendRune(b []byte, r rune) []byte { + n := len(b) + b = append(b, 0, 0, 0, 0) + return b[:n+utf8.EncodeRune(b[n:], r)] +} + +func appendCoerceInvalidUTF8(b []byte, s []byte) []byte { + c := [4]byte{} + + for _, r := range string(s) { + b = append(b, c[:utf8.EncodeRune(c[:], r)]...) + } + + return b +} + +func (d decoder) parseObject(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + + if b[0] != '{' { + return nil, b, Undefined, syntaxError(b, "expected '{' at the beginning of an object value") + } + + var err error + var a = b + var n = len(b) + var i = 0 + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "cannot decode object from empty input") + } + + if b[0] == '}' { + j := (n - len(b)) + 1 + return a[:j], a[j:], Object, nil + } + + if i != 0 { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field value") + } + if b[0] != ',' { + return nil, b, Undefined, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + if len(b) == 0 { + return nil, b, Undefined, unexpectedEOF(b) + } + if b[0] == '}' { + return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field") + } + } + + _, b, _, err = d.parseString(b) + if err != nil { + return nil, b, Undefined, err + } + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field key") + } + if b[0] != ':' { + return nil, b, Undefined, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + _, b, _, err = d.parseValue(b) + if err != nil { + return nil, b, Undefined, err + } + + i++ + } +} + +func (d decoder) parseArray(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + + if b[0] != '[' { + return nil, b, Undefined, syntaxError(b, "expected '[' at the beginning of array value") + } + + var err error + var a = b + var n = len(b) + var i = 0 + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "missing closing ']' after array value") + } + + if b[0] == ']' { + j := (n - len(b)) + 1 + return a[:j], a[j:], Array, nil + } + + if i != 0 { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after array element") + } + if b[0] != ',' { + return nil, b, Undefined, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + if len(b) == 0 { + return nil, b, Undefined, unexpectedEOF(b) + } + if b[0] == ']' { + return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field") + } + } + + _, b, _, err = d.parseValue(b) + if err != nil { + return nil, b, Undefined, err + } + + i++ + } +} + +func (d decoder) parseValue(b []byte) ([]byte, []byte, Kind, error) { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected end of JSON input") + } + + var v []byte + var k Kind + var err error + + switch b[0] { + case '{': + v, b, k, err = d.parseObject(b) + case '[': + k = Array + v, b, k, err = d.parseArray(b) + case '"': + v, b, k, err = d.parseString(b) + case 'n': + v, b, k, err = d.parseNull(b) + case 't': + v, b, k, err = d.parseTrue(b) + case 'f': + v, b, k, err = d.parseFalse(b) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v, b, k, err = d.parseNumber(b) + default: + err = syntaxError(b, "invalid character '%c' looking for beginning of value", b[0]) + } + + return v, b, k, err +} + +func hasNullPrefix(b []byte) bool { + return len(b) >= 4 && string(b[:4]) == "null" +} + +func hasTruePrefix(b []byte) bool { + return len(b) >= 4 && string(b[:4]) == "true" +} + +func hasFalsePrefix(b []byte) bool { + return len(b) >= 5 && string(b[:5]) == "false" +} + +func hasPrefix(b []byte, s string) bool { + return len(b) >= len(s) && s == string(b[:len(s)]) +} + +func hasLeadingSign(b []byte) bool { + return len(b) > 0 && (b[0] == '+' || b[0] == '-') +} + +func hasLeadingZeroes(b []byte) bool { + if hasLeadingSign(b) { + b = b[1:] + } + return len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' +} + +func appendToLower(b, s []byte) []byte { + if ascii.Valid(s) { // fast path for ascii strings + i := 0 + + for j := range s { + c := s[j] + + if 'A' <= c && c <= 'Z' { + b = append(b, s[i:j]...) + b = append(b, c+('a'-'A')) + i = j + 1 + } + } + + return append(b, s[i:]...) + } + + for _, r := range string(s) { + b = appendRune(b, foldRune(r)) + } + + return b +} + +func foldRune(r rune) rune { + if r = unicode.SimpleFold(r); 'A' <= r && r <= 'Z' { + r = r + ('a' - 'A') + } + return r +} diff --git a/vendor/github.com/segmentio/encoding/json/reflect.go b/vendor/github.com/segmentio/encoding/json/reflect.go new file mode 100644 index 00000000000..3a5c6f12179 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/reflect.go @@ -0,0 +1,20 @@ +//go:build go1.18 +// +build go1.18 + +package json + +import ( + "reflect" + "unsafe" +) + +func extendSlice(t reflect.Type, s *slice, n int) slice { + arrayType := reflect.ArrayOf(n, t.Elem()) + arrayData := reflect.New(arrayType) + reflect.Copy(arrayData.Elem(), reflect.NewAt(t, unsafe.Pointer(s)).Elem()) + return slice{ + data: unsafe.Pointer(arrayData.Pointer()), + len: s.len, + cap: n, + } +} diff --git a/vendor/github.com/segmentio/encoding/json/reflect_optimize.go b/vendor/github.com/segmentio/encoding/json/reflect_optimize.go new file mode 100644 index 00000000000..6936cefe24d --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/reflect_optimize.go @@ -0,0 +1,30 @@ +//go:build !go1.18 +// +build !go1.18 + +package json + +import ( + "reflect" + "unsafe" +) + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer + +//go:linkname typedslicecopy reflect.typedslicecopy +//go:noescape +func typedslicecopy(elemType unsafe.Pointer, dst, src slice) int + +func extendSlice(t reflect.Type, s *slice, n int) slice { + elemTypeRef := t.Elem() + elemTypePtr := ((*iface)(unsafe.Pointer(&elemTypeRef))).ptr + + d := slice{ + data: unsafe_NewArray(elemTypePtr, n), + len: s.len, + cap: n, + } + + typedslicecopy(elemTypePtr, d, *s) + return d +} diff --git a/vendor/github.com/segmentio/encoding/json/string.go b/vendor/github.com/segmentio/encoding/json/string.go new file mode 100644 index 00000000000..dba5c5d3618 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/string.go @@ -0,0 +1,70 @@ +package json + +import ( + "math/bits" + "unsafe" +) + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +// escapeIndex finds the index of the first char in `s` that requires escaping. +// A char requires escaping if it's outside of the range of [0x20, 0x7F] or if +// it includes a double quote or backslash. If the escapeHTML mode is enabled, +// the chars <, > and & also require escaping. If no chars in `s` require +// escaping, the return value is -1. +func escapeIndex(s string, escapeHTML bool) int { + chunks := stringToUint64(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | below(n, 0x20) | contains(n, '"') | contains(n, '\\') + if escapeHTML { + mask |= contains(n, '<') | contains(n, '>') | contains(n, '&') + } + if (mask & msb) != 0 { + return bits.TrailingZeros64(mask&msb) / 8 + } + } + + for i := len(chunks) * 8; i < len(s); i++ { + c := s[i] + if c < 0x20 || c > 0x7f || c == '"' || c == '\\' || (escapeHTML && (c == '<' || c == '>' || c == '&')) { + return i + } + } + + return -1 +} + +// below return a mask that can be used to determine if any of the bytes +// in `n` are below `b`. If a byte's MSB is set in the mask then that byte was +// below `b`. The result is only valid if `b`, and each byte in `n`, is below +// 0x80. +func below(n uint64, b byte) uint64 { + return n - expand(b) +} + +// contains returns a mask that can be used to determine if any of the +// bytes in `n` are equal to `b`. If a byte's MSB is set in the mask then +// that byte is equal to `b`. The result is only valid if `b`, and each +// byte in `n`, is below 0x80. +func contains(n uint64, b byte) uint64 { + return (n ^ expand(b)) - lsb +} + +// expand puts the specified byte into each of the 8 bytes of a uint64. +func expand(b byte) uint64 { + return lsb * uint64(b) +} + +func stringToUint64(s string) []uint64 { + return *(*[]uint64)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s) / 8, + Cap: len(s) / 8, + })) +} diff --git a/vendor/github.com/segmentio/encoding/json/token.go b/vendor/github.com/segmentio/encoding/json/token.go new file mode 100644 index 00000000000..652e36d76fa --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/token.go @@ -0,0 +1,416 @@ +package json + +import ( + "strconv" + "sync" + "unsafe" +) + +// Tokenizer is an iterator-style type which can be used to progressively parse +// through a json input. +// +// Tokenizing json is useful to build highly efficient parsing operations, for +// example when doing tranformations on-the-fly where as the program reads the +// input and produces the transformed json to an output buffer. +// +// Here is a common pattern to use a tokenizer: +// +// for t := json.NewTokenizer(b); t.Next(); { +// switch k := t.Kind(); k.Class() { +// case json.Null: +// ... +// case json.Bool: +// ... +// case json.Num: +// ... +// case json.String: +// ... +// case json.Array: +// ... +// case json.Object: +// ... +// } +// } +// +type Tokenizer struct { + // When the tokenizer is positioned on a json delimiter this field is not + // zero. In this case the possible values are '{', '}', '[', ']', ':', and + // ','. + Delim Delim + + // This field contains the raw json token that the tokenizer is pointing at. + // When Delim is not zero, this field is a single-element byte slice + // continaing the delimiter value. Otherwise, this field holds values like + // null, true, false, numbers, or quoted strings. + Value RawValue + + // When the tokenizer has encountered invalid content this field is not nil. + Err error + + // When the value is in an array or an object, this field contains the depth + // at which it was found. + Depth int + + // When the value is in an array or an object, this field contains the + // position at which it was found. + Index int + + // This field is true when the value is the key of an object. + IsKey bool + + // Tells whether the next value read from the tokenizer is a key. + isKey bool + + // json input for the tokenizer, pointing at data right after the last token + // that was parsed. + json []byte + + // Stack used to track entering and leaving arrays, objects, and keys. + stack *stack + + // Decoder used for parsing. + decoder +} + +// NewTokenizer constructs a new Tokenizer which reads its json input from b. +func NewTokenizer(b []byte) *Tokenizer { + return &Tokenizer{ + json: b, + decoder: decoder{flags: internalParseFlags(b)}, + } +} + +// Reset erases the state of t and re-initializes it with the json input from b. +func (t *Tokenizer) Reset(b []byte) { + if t.stack != nil { + releaseStack(t.stack) + } + // This code is similar to: + // + // *t = Tokenizer{json: b} + // + // However, it does not compile down to an invocation of duff-copy. + t.Delim = 0 + t.Value = nil + t.Err = nil + t.Depth = 0 + t.Index = 0 + t.IsKey = false + t.isKey = false + t.json = b + t.stack = nil + t.decoder = decoder{flags: internalParseFlags(b)} +} + +// Next returns a new tokenizer pointing at the next token, or the zero-value of +// Tokenizer if the end of the json input has been reached. +// +// If the tokenizer encounters malformed json while reading the input the method +// sets t.Err to an error describing the issue, and returns false. Once an error +// has been encountered, the tokenizer will always fail until its input is +// cleared by a call to its Reset method. +func (t *Tokenizer) Next() bool { + if t.Err != nil { + return false + } + + // Inlined code of the skipSpaces function, this give a ~15% speed boost. + i := 0 +skipLoop: + for _, c := range t.json { + switch c { + case sp, ht, nl, cr: + i++ + default: + break skipLoop + } + } + + if i > 0 { + t.json = t.json[i:] + } + + if len(t.json) == 0 { + t.Reset(nil) + return false + } + + var kind Kind + switch t.json[0] { + case '"': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseString(t.json) + case 'n': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseNull(t.json) + case 't': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseTrue(t.json) + case 'f': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseFalse(t.json) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseNumber(t.json) + case '{', '}', '[', ']', ':', ',': + t.Delim, t.Value, t.json = Delim(t.json[0]), t.json[:1], t.json[1:] + switch t.Delim { + case '{': + kind = Object + case '[': + kind = Array + } + default: + t.Delim = 0 + t.Value, t.json, t.Err = t.json[:1], t.json[1:], syntaxError(t.json, "expected token but found '%c'", t.json[0]) + } + + t.Depth = t.depth() + t.Index = t.index() + t.flags = t.flags.withKind(kind) + + if t.Delim == 0 { + t.IsKey = t.isKey + } else { + t.IsKey = false + + switch t.Delim { + case '{': + t.isKey = true + t.push(inObject) + case '[': + t.push(inArray) + case '}': + t.Err = t.pop(inObject) + t.Depth-- + t.Index = t.index() + case ']': + t.Err = t.pop(inArray) + t.Depth-- + t.Index = t.index() + case ':': + t.isKey = false + case ',': + if t.stack == nil || len(t.stack.state) == 0 { + t.Err = syntaxError(t.json, "found unexpected comma") + return false + } + if t.stack.is(inObject) { + t.isKey = true + } + t.stack.state[len(t.stack.state)-1].len++ + } + } + + return (t.Delim != 0 || len(t.Value) != 0) && t.Err == nil +} + +func (t *Tokenizer) depth() int { + if t.stack == nil { + return 0 + } + return t.stack.depth() +} + +func (t *Tokenizer) index() int { + if t.stack == nil { + return 0 + } + return t.stack.index() +} + +func (t *Tokenizer) push(typ scope) { + if t.stack == nil { + t.stack = acquireStack() + } + t.stack.push(typ) +} + +func (t *Tokenizer) pop(expect scope) error { + if t.stack == nil || !t.stack.pop(expect) { + return syntaxError(t.json, "found unexpected character while tokenizing json input") + } + return nil +} + +// Kind returns the kind of the value that the tokenizer is currently positioned +// on. +func (t *Tokenizer) Kind() Kind { return t.flags.kind() } + +// Bool returns a bool containing the value of the json boolean that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a boolean, the behavior is undefined. +func (t *Tokenizer) Bool() bool { return t.flags.kind() == True } + +// Int returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on an integer, the behavior is undefined. +func (t *Tokenizer) Int() int64 { + i, _, _ := t.parseInt(t.Value, int64Type) + return i +} + +// Uint returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a positive integer, the behavior is +// undefined. +func (t *Tokenizer) Uint() uint64 { + u, _, _ := t.parseUint(t.Value, uint64Type) + return u +} + +// Float returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a number, the behavior is undefined. +func (t *Tokenizer) Float() float64 { + f, _ := strconv.ParseFloat(*(*string)(unsafe.Pointer(&t.Value)), 64) + return f +} + +// String returns a byte slice containing the value of the json string that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// When possible, the returned byte slice references the backing array of the +// tokenizer. A new slice is only allocated if the tokenizer needed to unescape +// the json string. +// +// If the tokenizer is not positioned on a string, the behavior is undefined. +func (t *Tokenizer) String() []byte { + if t.flags.kind() == Unescaped && len(t.Value) > 1 { + return t.Value[1 : len(t.Value)-1] // unquote + } + s, _, _, _ := t.parseStringUnquote(t.Value, nil) + return s +} + +// RawValue represents a raw json value, it is intended to carry null, true, +// false, number, and string values only. +type RawValue []byte + +// String returns true if v contains a string value. +func (v RawValue) String() bool { return len(v) != 0 && v[0] == '"' } + +// Null returns true if v contains a null value. +func (v RawValue) Null() bool { return len(v) != 0 && v[0] == 'n' } + +// True returns true if v contains a true value. +func (v RawValue) True() bool { return len(v) != 0 && v[0] == 't' } + +// False returns true if v contains a false value. +func (v RawValue) False() bool { return len(v) != 0 && v[0] == 'f' } + +// Number returns true if v contains a number value. +func (v RawValue) Number() bool { + if len(v) != 0 { + switch v[0] { + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + } + return false +} + +// AppendUnquote writes the unquoted version of the string value in v into b. +func (v RawValue) AppendUnquote(b []byte) []byte { + d := decoder{} + s, r, _, err := d.parseStringUnquote(v, b) + if err != nil { + panic(err) + } + if len(r) != 0 { + panic(syntaxError(r, "unexpected trailing tokens after json value")) + } + return append(b, s...) +} + +// Unquote returns the unquoted version of the string value in v. +func (v RawValue) Unquote() []byte { + return v.AppendUnquote(nil) +} + +type scope int + +const ( + inArray scope = iota + inObject +) + +type state struct { + typ scope + len int +} + +type stack struct { + state []state +} + +func (s *stack) push(typ scope) { + s.state = append(s.state, state{typ: typ, len: 1}) +} + +func (s *stack) pop(expect scope) bool { + i := len(s.state) - 1 + + if i < 0 { + return false + } + + if found := s.state[i]; expect != found.typ { + return false + } + + s.state = s.state[:i] + return true +} + +func (s *stack) is(typ scope) bool { + return len(s.state) != 0 && s.state[len(s.state)-1].typ == typ +} + +func (s *stack) depth() int { + return len(s.state) +} + +func (s *stack) index() int { + if len(s.state) == 0 { + return 0 + } + return s.state[len(s.state)-1].len - 1 +} + +func acquireStack() *stack { + s, _ := stackPool.Get().(*stack) + if s == nil { + s = &stack{state: make([]state, 0, 4)} + } else { + s.state = s.state[:0] + } + return s +} + +func releaseStack(s *stack) { + stackPool.Put(s) +} + +var ( + stackPool sync.Pool // *stack +) diff --git a/vendor/go.lsp.dev/jsonrpc2/.codecov.yml b/vendor/go.lsp.dev/jsonrpc2/.codecov.yml new file mode 100644 index 00000000000..95e428957d4 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.codecov.yml @@ -0,0 +1,38 @@ +codecov: + allow_coverage_offsets: true + notify: + wait_for_ci: false + +coverage: + precision: 1 + round: down + range: "70...100" + + status: + project: + default: + target: auto + threshold: 1% + if_ci_failed: error + if_not_found: success + patch: + default: + only_pulls: true + target: 50% + threshold: 10% + if_ci_failed: error + if_not_found: failure + changes: + default: + if_ci_failed: error + if_not_found: success + only_pulls: false + branches: + - main + +comment: + behavior: default + show_carryforward_flags: true + +github_checks: + annotations: true diff --git a/vendor/go.lsp.dev/jsonrpc2/.errcheckignore b/vendor/go.lsp.dev/jsonrpc2/.errcheckignore new file mode 100644 index 00000000000..7fda6d3a3eb --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.errcheckignore @@ -0,0 +1 @@ +(*go.lsp.dev/jsonrpc2.Request).Reply diff --git a/vendor/go.lsp.dev/jsonrpc2/.gitattributes b/vendor/go.lsp.dev/jsonrpc2/.gitattributes new file mode 100644 index 00000000000..169fc0a41f7 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.gitattributes @@ -0,0 +1,11 @@ +# go.lsp.dev/jsonrpc2 project gitattributes file +# https://github.com/github/linguist#using-gitattributes +# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml + +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text + +docs/ linguist-documentation +*.pb.go linguist-generated +*_gen.go linguist-generated +*_string.go linguist-generated diff --git a/vendor/go.lsp.dev/jsonrpc2/.gitignore b/vendor/go.lsp.dev/jsonrpc2/.gitignore new file mode 100644 index 00000000000..6211ff933bd --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.gitignore @@ -0,0 +1,52 @@ +# go.lsp.dev/jsonrpc2 project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +# please do not open a pull request to add something created by your editor or tools + +# github/gitignore/Go.gitignore +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +# cgo generated +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +# test generated +_testmain.go + +# profile +*.pprof + +# coverage +coverage.* + +# tools +bin/ diff --git a/vendor/go.lsp.dev/jsonrpc2/.golangci.yml b/vendor/go.lsp.dev/jsonrpc2/.golangci.yml new file mode 100644 index 00000000000..c363c1835c2 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.golangci.yml @@ -0,0 +1,200 @@ +run: + timeout: 5m + issues-exit-code: 1 + tests: true + skip-dirs: [] + skip-dirs-use-default: true + skip-files: [] + allow-parallel-runners: true + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true + uniq-by-line: true + sort-results: true + +linters-settings: + dupl: + threshold: 100 + # errcheck: + # check-type-assertions: true + # check-blank: true + # exclude: .errcheckignore + funlen: + lines: 100 + statements: 60 + gocognit: + min-complexity: 20 + goconst: + min-len: 3 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - whyNoLint + settings: + hugeParam: + sizeThreshold: 80 + rangeExprCopy: + sizeThreshold: 512 + rangeValCopy: + sizeThreshold: 128 + gocyclo: + min-complexity: 15 + godot: + scope: declarations + capital: false + gofmt: + simplify: true + goimports: + local-prefixes: go.lsp.dev/jsonrpc2 + golint: + min-confidence: 0.3 + govet: + enable-all: true + check-shadowing: true + disable: + - fieldalignment + depguard: + list-type: blacklist + include-go-root: true + # packages-with-error-message: + # - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" + lll: + line-length: 120 + tab-width: 1 + maligned: + suggest-new: true + misspell: + locale: US + ignore-words: + - cancelled + nakedret: + max-func-lines: 30 + prealloc: + simple: true + range-loops: true + for-loops: true + testpackage: + skip-regexp: '.*(export)_test\.go' + unparam: + check-exported: true + algo: cha + unused: + check-exported: false + whitespace: + multi-if: true + multi-func: true + +linters: + fast: false + disabled: + - deadcode # Finds unused code + - errcheck # Errcheck is a program for checking for unchecked errors in go programs + - exhaustivestruct # Checks if all struct's fields are initialized + - forbidigo # Forbids identifiers + - gci # Gci control golang package import order and make it always deterministic + - gochecknoglobals # check that no global variables exist + - gochecknoinits # Checks that no init functions are present in Go code + - godox # Tool for detection of FIXME, TODO and other comment keywords + - goerr113 # Golang linter to check the errors handling expressions + - gofumpt # Gofumpt checks whether code was gofumpt-ed + - goheader # Checks is file header matches to pattern + - golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + - gomnd # An analyzer to detect magic numbers + - gomodguard # Allow and block list linter for direct Go module dependencies + - gosec # Inspects source code for security problems + - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity + - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - scopelint # Scopelint checks for unpinned variables in go programs + - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed + - unparam # Reports unused function parameters + - wrapcheck # Checks that errors returned from external packages are wrapped TODO(zchee): enable + - wsl # Whitespace Linter + enable: + - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + - bodyclose # checks whether HTTP response body is closed successfully + - depguard # Go linter that checks if package imports are in a list of acceptable packages + - dogsled # Checks assignments with too many blank identifiers + - dupl # Tool for code clone detection + - errorlint # source code linter for Go software that can be used to find code that will cause problemswith the error wrapping scheme introduced in Go 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - funlen # Tool for detection of long functions + - gocognit # Computes and checks the cognitive complexity of functions + - goconst # Finds repeated strings that could be replaced by a constant + - gocritic # The most opinionated Go source code linter + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godot # Check if comments end in a period + - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports + - goprintffuncname # Checks that printf-like functions are named with `f` at the end + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ifshort # Checks that your code uses short syntax for if-statements whenever possible + - ineffassign # Detects when assignments to existing variables are not used + - lll # Reports long lines + - makezero # Finds slice declarations with non-zero initial length + - misspell # Finds commonly misspelled English words in comments + - nakedret # Finds naked returns in functions greater than a specified function length + - nestif # Reports deeply nested if statements + - noctx # noctx finds sending http request without context.Context + - nolintlint # Reports ill-formed or insufficient nolint directives + - prealloc # Finds slice declarations that could potentially be preallocated + - predeclared # find code that shadows one of Go's predeclared identifiers + - rowserrcheck # checks whether Err of rows is checked successfully + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - structcheck # Finds unused struct fields + - stylecheck # Stylecheck is a replacement for golint + - testpackage # linter that makes you use a separate _test package + - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unconvert # Remove unnecessary type conversions + - unused # Checks Go code for unused constants, variables, functions and types + - varcheck # Finds unused global variables and constants + - whitespace # Tool for detection of leading and trailing whitespace + +issues: + max-same-issues: 0 + exclude-use-default: true + exclude-rules: + - path: _test\.go + linters: + - errcheck + - funlen + - gocognit + - goconst + - gocyclo + - lll + - maligned + - wrapcheck + - path: "(.*)?_example_test.go" + linters: + - gocritic + # `TestMain` function is no longer required to call `os.Exit` since Go 1.15. + # ref: https://golang.org/doc/go1.15#testing + - text: "SA3000:" + linters: + - staticcheck + # Exclude shadow checking on the variable named err + - text: "shadow: declaration of \"(err|ok)\"" + linters: + - govet + # fake implements + - path: fake/fake.go + linters: + - errcheck + # future use + - path: wire.go + text: "`(codeServerErrorStart|codeServerErrorEnd)` is unused" + # goroutine + - path: handler.go + text: "Error return value of `handler` is not checked" diff --git a/vendor/go.lsp.dev/jsonrpc2/LICENSE b/vendor/go.lsp.dev/jsonrpc2/LICENSE new file mode 100644 index 00000000000..e8748709cfb --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/jsonrpc2/Makefile b/vendor/go.lsp.dev/jsonrpc2/Makefile new file mode 100644 index 00000000000..a09304bf4b6 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/Makefile @@ -0,0 +1,129 @@ +# ----------------------------------------------------------------------------- +# global + +.DEFAULT_GOAL := test +comma := , +empty := +space := $(empty) $(empty) + +# ----------------------------------------------------------------------------- +# go + +GO_PATH ?= $(shell go env GOPATH) +GO_OS ?= $(shell go env GOOS) +GO_ARCH ?= $(shell go env GOARCH) + +PKG := $(subst $(GO_PATH)/src/,,$(CURDIR)) +CGO_ENABLED ?= 0 +GO_BUILDTAGS=osusergo netgo static +GO_LDFLAGS=-s -w "-extldflags=-static" +GO_FLAGS ?= -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' -ldflags='${GO_LDFLAGS}' -installsuffix=netgo + +GO_PKGS := $(shell go list ./...) +GO_TEST ?= ${TOOLS_BIN}/gotestsum -- +GO_TEST_PKGS ?= $(shell go list -f='{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./...) +GO_TEST_FLAGS ?= -race -count=1 +GO_TEST_FUNC ?= . +GO_COVERAGE_OUT ?= coverage.out +GO_BENCH_FLAGS ?= -benchmem +GO_BENCH_FUNC ?= . +GO_LINT_FLAGS ?= + +TOOLS := $(shell cd tools; go list -f '{{ join .Imports " " }}' -tags=tools) +TOOLS_BIN := ${CURDIR}/tools/bin + +# Set build environment +JOBS := $(shell getconf _NPROCESSORS_CONF) + +# ----------------------------------------------------------------------------- +# defines + +define target +@printf "+ $(patsubst ,$@,$(1))\\n" >&2 +endef + +# ----------------------------------------------------------------------------- +# target + +##@ test, bench, coverage + +export GOTESTSUM_FORMAT=standard-verbose + +.PHONY: test +test: CGO_ENABLED=1 +test: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})' +test: tools/bin/gotestsum ## Runs package test including race condition. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -run=${GO_TEST_FUNC} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS} + +.PHONY: bench +bench: ## Take a package benchmark. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} -run='^$$' -bench=${GO_BENCH_FUNC} ${GO_BENCH_FLAGS} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS} + +.PHONY: coverage +coverage: CGO_ENABLED=1 +coverage: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})' +coverage: tools/bin/gotestsum ## Takes packages test coverage. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -covermode=atomic -coverpkg=${PKG}/... -coverprofile=${GO_COVERAGE_OUT} $(strip ${GO_FLAGS}) ${GO_PKGS} + + +##@ fmt, lint + +.PHONY: lint +lint: fmt lint/golangci-lint ## Run all linters. + +.PHONY: fmt +fmt: tools/bin/goimportz tools/bin/gofumpt ## Run goimportz and gofumpt. + $(call target) + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/goimportz -local=${PKG},$(subst /jsonrpc2,,$(PKG)) -w + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/gofumpt -extra -w + +.PHONY: lint/golangci-lint +lint/golangci-lint: tools/bin/golangci-lint .golangci.yml ## Run golangci-lint. + $(call target) + ${TOOLS_BIN}/golangci-lint -j ${JOBS} run $(strip ${GO_LINT_FLAGS}) ./... + + +##@ tools + +.PHONY: tools +tools: tools/bin/'' ## Install tools + +tools/%: tools/bin/% ## install an individual dependent tool + +tools/bin/%: ${CURDIR}/tools/go.mod ${CURDIR}/tools/go.sum + @cd tools; \ + for t in ${TOOLS}; do \ + if [ -z '$*' ] || [ $$(basename $$t) = '$*' ]; then \ + echo "Install $$t ..."; \ + GOBIN=${TOOLS_BIN} CGO_ENABLED=0 go install -v -mod=mod ${GO_FLAGS} "$${t}"; \ + fi \ + done + + +##@ clean + +.PHONY: clean +clean: ## Cleanups binaries and extra files in the package. + $(call target) + @rm -rf *.out *.test *.prof trace.txt ${TOOLS_BIN} + + +##@ miscellaneous + +.PHONY: todo +TODO: ## Print the all of (TODO|BUG|XXX|FIXME|NOTE) in packages. + @grep -E '(TODO|BUG|XXX|FIXME)(\(.+\):|:)' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*') + +.PHONY: env/% +env/%: ## Print the value of MAKEFILE_VARIABLE. Use `make env/GO_FLAGS` or etc. + @echo $($*) + + +##@ help + +.PHONY: help +help: ## Show this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[33m\033[0m\n"} /^[a-zA-Z_0-9\/%_-]+:.*?##/ { printf " \033[1;32m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/vendor/go.lsp.dev/jsonrpc2/README.md b/vendor/go.lsp.dev/jsonrpc2/README.md new file mode 100644 index 00000000000..373befc736d --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/README.md @@ -0,0 +1,19 @@ +# jsonrpc2 + +[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga] + +Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go. + + + +[circleci]: https://app.circleci.com/pipelines/github/go-language-server/jsonrpc2 +[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/jsonrpc2 +[module]: https://github.com/go-language-server/jsonrpc2/releases/latest +[codecov]: https://codecov.io/gh/go-language-server/jsonrpc2 +[ga]: https://github.com/go-language-server/jsonrpc2 + +[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/jsonrpc2/main.svg?style=for-the-badge&label=CIRCLECI&logo=circleci +[pkg.go.dev-badge]: https://bit.ly/shields-io-pkg-go-dev +[module-badge]: https://img.shields.io/github/release/go-language-server/jsonrpc2.svg?color=00add8&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4= +[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/jsonrpc2/main?logo=codecov&style=for-the-badge +[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/jsonrpc2?useReferer&pixel diff --git a/vendor/go.lsp.dev/jsonrpc2/codes.go b/vendor/go.lsp.dev/jsonrpc2/codes.go new file mode 100644 index 00000000000..5da58ea3f64 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/codes.go @@ -0,0 +1,86 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +// Code is an error code as defined in the JSON-RPC spec. +type Code int32 + +// list of JSON-RPC error codes. +const ( + // ParseError is the invalid JSON was received by the server. + // An error occurred on the server while parsing the JSON text. + ParseError Code = -32700 + + // InvalidRequest is the JSON sent is not a valid Request object. + InvalidRequest Code = -32600 + + // MethodNotFound is the method does not exist / is not available. + MethodNotFound Code = -32601 + + // InvalidParams is the invalid method parameter(s). + InvalidParams Code = -32602 + + // InternalError is the internal JSON-RPC error. + InternalError Code = -32603 + + // JSONRPCReservedErrorRangeStart is the start range of JSON RPC reserved error codes. + // + // It doesn't denote a real error code. No LSP error codes should + // be defined between the start and end range. For backwards + // compatibility the "ServerNotInitialized" and the "UnknownErrorCode" + // are left in the range. + // + // @since 3.16.0. + JSONRPCReservedErrorRangeStart Code = -32099 + + // CodeServerErrorStart reserved for implementation-defined server-errors. + // + // Deprecated: Use JSONRPCReservedErrorRangeStart instead. + CodeServerErrorStart = JSONRPCReservedErrorRangeStart + + // ServerNotInitialized is the error of server not initialized. + ServerNotInitialized Code = -32002 + + // UnknownError should be used for all non coded errors. + UnknownError Code = -32001 + + // JSONRPCReservedErrorRangeEnd is the start range of JSON RPC reserved error codes. + // + // It doesn't denote a real error code. + // + // @since 3.16.0. + JSONRPCReservedErrorRangeEnd Code = -32000 + + // CodeServerErrorEnd reserved for implementation-defined server-errors. + // + // Deprecated: Use JSONRPCReservedErrorRangeEnd instead. + CodeServerErrorEnd = JSONRPCReservedErrorRangeEnd +) + +// This file contains the Go forms of the wire specification. +// +// See http://www.jsonrpc.org/specification for details. +// +// list of JSON-RPC errors. +var ( + // ErrUnknown should be used for all non coded errors. + ErrUnknown = NewError(UnknownError, "JSON-RPC unknown error") + + // ErrParse is used when invalid JSON was received by the server. + ErrParse = NewError(ParseError, "JSON-RPC parse error") + + // ErrInvalidRequest is used when the JSON sent is not a valid Request object. + ErrInvalidRequest = NewError(InvalidRequest, "JSON-RPC invalid request") + + // ErrMethodNotFound should be returned by the handler when the method does + // not exist / is not available. + ErrMethodNotFound = NewError(MethodNotFound, "JSON-RPC method not found") + + // ErrInvalidParams should be returned by the handler when method + // parameter(s) were invalid. + ErrInvalidParams = NewError(InvalidParams, "JSON-RPC invalid params") + + // ErrInternal is not currently returned but defined for completeness. + ErrInternal = NewError(InternalError, "JSON-RPC internal error") +) diff --git a/vendor/go.lsp.dev/jsonrpc2/conn.go b/vendor/go.lsp.dev/jsonrpc2/conn.go new file mode 100644 index 00000000000..3b12819babe --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/conn.go @@ -0,0 +1,245 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "bytes" + "context" + "fmt" + "sync" + "sync/atomic" + + "github.com/segmentio/encoding/json" +) + +// Conn is the common interface to jsonrpc clients and servers. +// +// Conn is bidirectional; it does not have a designated server or client end. +// It manages the jsonrpc2 protocol, connecting responses back to their calls. +type Conn interface { + // Call invokes the target method and waits for a response. + // + // The params will be marshaled to JSON before sending over the wire, and will + // be handed to the method invoked. + // + // The response will be unmarshaled from JSON into the result. + // + // The id returned will be unique from this connection, and can be used for + // logging or tracking. + Call(ctx context.Context, method string, params, result interface{}) (ID, error) + + // Notify invokes the target method but does not wait for a response. + // + // The params will be marshaled to JSON before sending over the wire, and will + // be handed to the method invoked. + Notify(ctx context.Context, method string, params interface{}) error + + // Go starts a goroutine to handle the connection. + // + // It must be called exactly once for each Conn. It returns immediately. + // Must block on Done() to wait for the connection to shut down. + // + // This is a temporary measure, this should be started automatically in the + // future. + Go(ctx context.Context, handler Handler) + + // Close closes the connection and it's underlying stream. + // + // It does not wait for the close to complete, use the Done() channel for + // that. + Close() error + + // Done returns a channel that will be closed when the processing goroutine + // has terminated, which will happen if Close() is called or an underlying + // stream is closed. + Done() <-chan struct{} + + // Err returns an error if there was one from within the processing goroutine. + // + // If err returns non nil, the connection will be already closed or closing. + Err() error +} + +type conn struct { + seq int32 // access atomically + writeMu sync.Mutex // protects writes to the stream + stream Stream // supplied stream + pendingMu sync.Mutex // protects the pending map + pending map[ID]chan *Response // holds the pending response channel with the ID as the key. + + done chan struct{} // closed when done + err atomic.Value // holds run error +} + +// NewConn creates a new connection object around the supplied stream. +func NewConn(s Stream) Conn { + conn := &conn{ + stream: s, + pending: make(map[ID]chan *Response), + done: make(chan struct{}), + } + return conn +} + +// Call implements Conn. +func (c *conn) Call(ctx context.Context, method string, params, result interface{}) (id ID, err error) { + // generate a new request identifier + id = NewNumberID(atomic.AddInt32(&c.seq, 1)) + call, err := NewCall(id, method, params) + if err != nil { + return id, fmt.Errorf("marshaling call parameters: %w", err) + } + + // We have to add ourselves to the pending map before we send, otherwise we + // are racing the response. Also add a buffer to rchan, so that if we get a + // wire response between the time this call is cancelled and id is deleted + // from c.pending, the send to rchan will not block. + rchan := make(chan *Response, 1) + + c.pendingMu.Lock() + c.pending[id] = rchan + c.pendingMu.Unlock() + + defer func() { + c.pendingMu.Lock() + delete(c.pending, id) + c.pendingMu.Unlock() + }() + + // now we are ready to send + _, err = c.write(ctx, call) + if err != nil { + // sending failed, we will never get a response, so don't leave it pending + return id, err + } + + // now wait for the response + select { + case resp := <-rchan: + // is it an error response? + if resp.err != nil { + return id, resp.err + } + + if result == nil || len(resp.result) == 0 { + return id, nil + } + + dec := json.NewDecoder(bytes.NewReader(resp.result)) + dec.ZeroCopy() + if err := dec.Decode(result); err != nil { + return id, fmt.Errorf("unmarshaling result: %w", err) + } + + return id, nil + + case <-ctx.Done(): + return id, ctx.Err() + } +} + +// Notify implements Conn. +func (c *conn) Notify(ctx context.Context, method string, params interface{}) (err error) { + notify, err := NewNotification(method, params) + if err != nil { + return fmt.Errorf("marshaling notify parameters: %w", err) + } + + _, err = c.write(ctx, notify) + + return err +} + +func (c *conn) replier(req Message) Replier { + return func(ctx context.Context, result interface{}, err error) error { + call, ok := req.(*Call) + if !ok { + // request was a notify, no need to respond + return nil + } + + response, err := NewResponse(call.id, result, err) + if err != nil { + return err + } + + _, err = c.write(ctx, response) + if err != nil { + // TODO(iancottrell): if a stream write fails, we really need to shut down the whole stream + return err + } + return nil + } +} + +func (c *conn) write(ctx context.Context, msg Message) (int64, error) { + c.writeMu.Lock() + n, err := c.stream.Write(ctx, msg) + c.writeMu.Unlock() + if err != nil { + return 0, fmt.Errorf("write to stream: %w", err) + } + + return n, nil +} + +// Go implements Conn. +func (c *conn) Go(ctx context.Context, handler Handler) { + go c.run(ctx, handler) +} + +func (c *conn) run(ctx context.Context, handler Handler) { + defer close(c.done) + + for { + // get the next message + msg, _, err := c.stream.Read(ctx) + if err != nil { + // The stream failed, we cannot continue. + c.fail(err) + return + } + + switch msg := msg.(type) { + case Request: + if err := handler(ctx, c.replier(msg), msg); err != nil { + c.fail(err) + } + + case *Response: + // If method is not set, this should be a response, in which case we must + // have an id to send the response back to the caller. + c.pendingMu.Lock() + rchan, ok := c.pending[msg.id] + c.pendingMu.Unlock() + if ok { + rchan <- msg + } + } + } +} + +// Close implements Conn. +func (c *conn) Close() error { + return c.stream.Close() +} + +// Done implements Conn. +func (c *conn) Done() <-chan struct{} { + return c.done +} + +// Err implements Conn. +func (c *conn) Err() error { + if err := c.err.Load(); err != nil { + return err.(error) + } + return nil +} + +// fail sets a failure condition on the stream and closes it. +func (c *conn) fail(err error) { + c.err.Store(err) + c.stream.Close() +} diff --git a/vendor/go.lsp.dev/jsonrpc2/errors.go b/vendor/go.lsp.dev/jsonrpc2/errors.go new file mode 100644 index 00000000000..9c15d42865c --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/errors.go @@ -0,0 +1,70 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "errors" + "fmt" + + "github.com/segmentio/encoding/json" +) + +// Error represents a JSON-RPC error. +type Error struct { + // Code a number indicating the error type that occurred. + Code Code `json:"code"` + + // Message a string providing a short description of the error. + Message string `json:"message"` + + // Data a Primitive or Structured value that contains additional + // information about the error. Can be omitted. + Data *json.RawMessage `json:"data,omitempty"` +} + +// compile time check whether the Error implements error interface. +var _ error = (*Error)(nil) + +// Error implements error.Error. +func (e *Error) Error() string { + if e == nil { + return "" + } + return e.Message +} + +// Unwrap implements errors.Unwrap. +// +// Returns the error underlying the receiver, which may be nil. +func (e *Error) Unwrap() error { return errors.New(e.Message) } + +// NewError builds a Error struct for the suppied code and message. +func NewError(c Code, message string) *Error { + return &Error{ + Code: c, + Message: message, + } +} + +// Errorf builds a Error struct for the suppied code, format and args. +func Errorf(c Code, format string, args ...interface{}) *Error { + return &Error{ + Code: c, + Message: fmt.Sprintf(format, args...), + } +} + +// constErr represents a error constant. +type constErr string + +// compile time check whether the constErr implements error interface. +var _ error = (*constErr)(nil) + +// Error implements error.Error. +func (e constErr) Error() string { return string(e) } + +const ( + // ErrIdleTimeout is returned when serving timed out waiting for new connections. + ErrIdleTimeout = constErr("timed out waiting for new connections") +) diff --git a/vendor/go.lsp.dev/jsonrpc2/handler.go b/vendor/go.lsp.dev/jsonrpc2/handler.go new file mode 100644 index 00000000000..3c4e32251de --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/handler.go @@ -0,0 +1,120 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "context" + "fmt" + "sync" +) + +// Handler is invoked to handle incoming requests. +// +// The Replier sends a reply to the request and must be called exactly once. +type Handler func(ctx context.Context, reply Replier, req Request) error + +// Replier is passed to handlers to allow them to reply to the request. +// +// If err is set then result will be ignored. +type Replier func(ctx context.Context, result interface{}, err error) error + +// MethodNotFoundHandler is a Handler that replies to all call requests with the +// standard method not found response. +// +// This should normally be the final handler in a chain. +func MethodNotFoundHandler(ctx context.Context, reply Replier, req Request) error { + return reply(ctx, nil, fmt.Errorf("%q: %w", req.Method(), ErrMethodNotFound)) +} + +// ReplyHandler creates a Handler that panics if the wrapped handler does +// not call Reply for every request that it is passed. +func ReplyHandler(handler Handler) (h Handler) { + h = Handler(func(ctx context.Context, reply Replier, req Request) error { + called := false + err := handler(ctx, func(ctx context.Context, result interface{}, err error) error { + if called { + panic(fmt.Errorf("request %q replied to more than once", req.Method())) + } + called = true + + return reply(ctx, result, err) + }, req) + if !called { + panic(fmt.Errorf("request %q was never replied to", req.Method())) + } + return err + }) + + return h +} + +// CancelHandler returns a handler that supports cancellation, and a function +// that can be used to trigger canceling in progress requests. +func CancelHandler(handler Handler) (h Handler, canceller func(id ID)) { + var mu sync.Mutex + handling := make(map[ID]context.CancelFunc) + + h = Handler(func(ctx context.Context, reply Replier, req Request) error { + if call, ok := req.(*Call); ok { + cancelCtx, cancel := context.WithCancel(ctx) + ctx = cancelCtx + + mu.Lock() + handling[call.ID()] = cancel + mu.Unlock() + + innerReply := reply + reply = func(ctx context.Context, result interface{}, err error) error { + mu.Lock() + delete(handling, call.ID()) + mu.Unlock() + return innerReply(ctx, result, err) + } + } + return handler(ctx, reply, req) + }) + + canceller = func(id ID) { + mu.Lock() + cancel, found := handling[id] + mu.Unlock() + if found { + cancel() + } + } + + return h, canceller +} + +// AsyncHandler returns a handler that processes each request goes in its own +// goroutine. +// +// The handler returns immediately, without the request being processed. +// Each request then waits for the previous request to finish before it starts. +// +// This allows the stream to unblock at the cost of unbounded goroutines +// all stalled on the previous one. +func AsyncHandler(handler Handler) (h Handler) { + nextRequest := make(chan struct{}) + close(nextRequest) + + h = Handler(func(ctx context.Context, reply Replier, req Request) error { + waitForPrevious := nextRequest + nextRequest = make(chan struct{}) + unlockNext := nextRequest + innerReply := reply + reply = func(ctx context.Context, result interface{}, err error) error { + close(unlockNext) + return innerReply(ctx, result, err) + } + + go func() { + <-waitForPrevious + _ = handler(ctx, reply, req) + }() + return nil + }) + + return h +} diff --git a/vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go b/vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go new file mode 100644 index 00000000000..dafd0667251 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go. +// +// https://www.jsonrpc.org/specification +package jsonrpc2 // import "go.lsp.dev/jsonrpc2" diff --git a/vendor/go.lsp.dev/jsonrpc2/message.go b/vendor/go.lsp.dev/jsonrpc2/message.go new file mode 100644 index 00000000000..0f3fd54c600 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/message.go @@ -0,0 +1,358 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "bytes" + "errors" + "fmt" + + "github.com/segmentio/encoding/json" +) + +// Message is the interface to all JSON-RPC message types. +// +// They share no common functionality, but are a closed set of concrete types +// that are allowed to implement this interface. +// +// The message types are *Call, *Response and *Notification. +type Message interface { + // jsonrpc2Message is used to make the set of message implementations a + // closed set. + jsonrpc2Message() +} + +// Request is the shared interface to jsonrpc2 messages that request +// a method be invoked. +// +// The request types are a closed set of *Call and *Notification. +type Request interface { + Message + + // Method is a string containing the method name to invoke. + Method() string + // Params is either a struct or an array with the parameters of the method. + Params() json.RawMessage + + // jsonrpc2Request is used to make the set of request implementations closed. + jsonrpc2Request() +} + +// Call is a request that expects a response. +// +// The response will have a matching ID. +type Call struct { + // Method is a string containing the method name to invoke. + method string + // Params is either a struct or an array with the parameters of the method. + params json.RawMessage + // id of this request, used to tie the Response back to the request. + id ID +} + +// make sure a Call implements the Request, json.Marshaler and json.Unmarshaler and interfaces. +var ( + _ Request = (*Call)(nil) + _ json.Marshaler = (*Call)(nil) + _ json.Unmarshaler = (*Call)(nil) +) + +// NewCall constructs a new Call message for the supplied ID, method and +// parameters. +func NewCall(id ID, method string, params interface{}) (*Call, error) { + p, merr := marshalInterface(params) + req := &Call{ + id: id, + method: method, + params: p, + } + return req, merr +} + +// ID returns the current call id. +func (c *Call) ID() ID { return c.id } + +// Method implements Request. +func (c *Call) Method() string { return c.method } + +// Params implements Request. +func (c *Call) Params() json.RawMessage { return c.params } + +// jsonrpc2Message implements Request. +func (Call) jsonrpc2Message() {} + +// jsonrpc2Request implements Request. +func (Call) jsonrpc2Request() {} + +// MarshalJSON implements json.Marshaler. +func (c Call) MarshalJSON() ([]byte, error) { + req := wireRequest{ + Method: c.method, + Params: &c.params, + ID: &c.id, + } + data, err := json.Marshal(req) + if err != nil { + return data, fmt.Errorf("marshaling call: %w", err) + } + + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (c *Call) UnmarshalJSON(data []byte) error { + var req wireRequest + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&req); err != nil { + return fmt.Errorf("unmarshaling call: %w", err) + } + + c.method = req.Method + if req.Params != nil { + c.params = *req.Params + } + if req.ID != nil { + c.id = *req.ID + } + + return nil +} + +// Response is a reply to a Request. +// +// It will have the same ID as the call it is a response to. +type Response struct { + // result is the content of the response. + result json.RawMessage + // err is set only if the call failed. + err error + // ID of the request this is a response to. + id ID +} + +// make sure a Response implements the Message, json.Marshaler and json.Unmarshaler and interfaces. +var ( + _ Message = (*Response)(nil) + _ json.Marshaler = (*Response)(nil) + _ json.Unmarshaler = (*Response)(nil) +) + +// NewResponse constructs a new Response message that is a reply to the +// supplied. If err is set result may be ignored. +func NewResponse(id ID, result interface{}, err error) (*Response, error) { + r, merr := marshalInterface(result) + resp := &Response{ + id: id, + result: r, + err: err, + } + return resp, merr +} + +// ID returns the current response id. +func (r *Response) ID() ID { return r.id } + +// Result returns the Response result. +func (r *Response) Result() json.RawMessage { return r.result } + +// Err returns the Response error. +func (r *Response) Err() error { return r.err } + +// jsonrpc2Message implements Message. +func (r *Response) jsonrpc2Message() {} + +// MarshalJSON implements json.Marshaler. +func (r Response) MarshalJSON() ([]byte, error) { + resp := &wireResponse{ + Error: toError(r.err), + ID: &r.id, + } + if resp.Error == nil { + resp.Result = &r.result + } + + data, err := json.Marshal(resp) + if err != nil { + return data, fmt.Errorf("marshaling notification: %w", err) + } + + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (r *Response) UnmarshalJSON(data []byte) error { + var resp wireResponse + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&resp); err != nil { + return fmt.Errorf("unmarshaling jsonrpc response: %w", err) + } + + if resp.Result != nil { + r.result = *resp.Result + } + if resp.Error != nil { + r.err = resp.Error + } + if resp.ID != nil { + r.id = *resp.ID + } + + return nil +} + +func toError(err error) *Error { + if err == nil { + // no error, the response is complete + return nil + } + + var wrapped *Error + if errors.As(err, &wrapped) { + // already a wire error, just use it + return wrapped + } + + result := &Error{Message: err.Error()} + if errors.As(err, &wrapped) { + // if we wrapped a wire error, keep the code from the wrapped error + // but the message from the outer error + result.Code = wrapped.Code + } + + return result +} + +// Notification is a request for which a response cannot occur, and as such +// it has not ID. +type Notification struct { + // Method is a string containing the method name to invoke. + method string + + params json.RawMessage +} + +// make sure a Notification implements the Request, json.Marshaler and json.Unmarshaler and interfaces. +var ( + _ Request = (*Notification)(nil) + _ json.Marshaler = (*Notification)(nil) + _ json.Unmarshaler = (*Notification)(nil) +) + +// NewNotification constructs a new Notification message for the supplied +// method and parameters. +func NewNotification(method string, params interface{}) (*Notification, error) { + p, merr := marshalInterface(params) + notify := &Notification{ + method: method, + params: p, + } + return notify, merr +} + +// Method implements Request. +func (n *Notification) Method() string { return n.method } + +// Params implements Request. +func (n *Notification) Params() json.RawMessage { return n.params } + +// jsonrpc2Message implements Request. +func (Notification) jsonrpc2Message() {} + +// jsonrpc2Request implements Request. +func (Notification) jsonrpc2Request() {} + +// MarshalJSON implements json.Marshaler. +func (n Notification) MarshalJSON() ([]byte, error) { + req := wireRequest{ + Method: n.method, + Params: &n.params, + } + data, err := json.Marshal(req) + if err != nil { + return data, fmt.Errorf("marshaling notification: %w", err) + } + + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (n *Notification) UnmarshalJSON(data []byte) error { + var req wireRequest + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&req); err != nil { + return fmt.Errorf("unmarshaling notification: %w", err) + } + + n.method = req.Method + if req.Params != nil { + n.params = *req.Params + } + + return nil +} + +// DecodeMessage decodes data to Message. +func DecodeMessage(data []byte) (Message, error) { + var msg combined + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&msg); err != nil { + return nil, fmt.Errorf("unmarshaling jsonrpc message: %w", err) + } + + if msg.Method == "" { + // no method, should be a response + if msg.ID == nil { + return nil, ErrInvalidRequest + } + + resp := &Response{ + id: *msg.ID, + } + if msg.Error != nil { + resp.err = msg.Error + } + if msg.Result != nil { + resp.result = *msg.Result + } + + return resp, nil + } + + // has a method, must be a request + if msg.ID == nil { + // request with no ID is a notify + notify := &Notification{ + method: msg.Method, + } + if msg.Params != nil { + notify.params = *msg.Params + } + + return notify, nil + } + + // request with an ID, must be a call + call := &Call{ + method: msg.Method, + id: *msg.ID, + } + if msg.Params != nil { + call.params = *msg.Params + } + + return call, nil +} + +// marshalInterface marshal obj to json.RawMessage. +func marshalInterface(obj interface{}) (json.RawMessage, error) { + data, err := json.Marshal(obj) + if err != nil { + return json.RawMessage{}, fmt.Errorf("failed to marshal json: %w", err) + } + return json.RawMessage(data), nil +} diff --git a/vendor/go.lsp.dev/jsonrpc2/serve.go b/vendor/go.lsp.dev/jsonrpc2/serve.go new file mode 100644 index 00000000000..3fc28dd6574 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/serve.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "context" + "fmt" + "net" + "os" + "time" +) + +// NOTE: This file provides an experimental API for serving multiple remote +// jsonrpc2 clients over the network. For now, it is intentionally similar to +// net/http, but that may change in the future as we figure out the correct +// semantics. + +// StreamServer is used to serve incoming jsonrpc2 clients communicating over +// a newly created connection. +type StreamServer interface { + ServeStream(context.Context, Conn) error +} + +// ServerFunc is an adapter that implements the StreamServer interface +// using an ordinary function. +type ServerFunc func(context.Context, Conn) error + +// ServeStream implements StreamServer. +// +// ServeStream calls f(ctx, s). +func (f ServerFunc) ServeStream(ctx context.Context, c Conn) error { + return f(ctx, c) +} + +// HandlerServer returns a StreamServer that handles incoming streams using the +// provided handler. +func HandlerServer(h Handler) StreamServer { + return ServerFunc(func(ctx context.Context, conn Conn) error { + conn.Go(ctx, h) + <-conn.Done() + return conn.Err() + }) +} + +// ListenAndServe starts an jsonrpc2 server on the given address. +// +// If idleTimeout is non-zero, ListenAndServe exits after there are no clients for +// this duration, otherwise it exits only on error. +func ListenAndServe(ctx context.Context, network, addr string, server StreamServer, idleTimeout time.Duration) error { + ln, err := net.Listen(network, addr) + if err != nil { + return fmt.Errorf("failed to listen %s:%s: %w", network, addr, err) + } + defer ln.Close() + + if network == "unix" { + defer os.Remove(addr) + } + + return Serve(ctx, ln, server, idleTimeout) +} + +// Serve accepts incoming connections from the network, and handles them using +// the provided server. If idleTimeout is non-zero, ListenAndServe exits after +// there are no clients for this duration, otherwise it exits only on error. +func Serve(ctx context.Context, ln net.Listener, server StreamServer, idleTimeout time.Duration) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Max duration: ~290 years; surely that's long enough. + const forever = 1<<63 - 1 + if idleTimeout <= 0 { + idleTimeout = forever + } + connTimer := time.NewTimer(idleTimeout) + + newConns := make(chan net.Conn) + doneListening := make(chan error) + closedConns := make(chan error) + + go func() { + for { + nc, err := ln.Accept() + if err != nil { + select { + case doneListening <- fmt.Errorf("accept: %w", err): + case <-ctx.Done(): + } + return + } + + newConns <- nc + } + }() + + activeConns := 0 + for { + select { + case netConn := <-newConns: + activeConns++ + connTimer.Stop() + stream := NewStream(netConn) + go func() { + conn := NewConn(stream) + closedConns <- server.ServeStream(ctx, conn) + stream.Close() + }() + + case err := <-doneListening: + return err + + case <-closedConns: + // if !isClosingError(err) { + // } + + activeConns-- + if activeConns == 0 { + connTimer.Reset(idleTimeout) + } + + case <-connTimer.C: + return ErrIdleTimeout + + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/vendor/go.lsp.dev/jsonrpc2/stream.go b/vendor/go.lsp.dev/jsonrpc2/stream.go new file mode 100644 index 00000000000..b05ab6bbfc4 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/stream.go @@ -0,0 +1,226 @@ +// SPDX-FileCopyrightText: 2018 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "bufio" + "context" + stdjson "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/segmentio/encoding/json" +) + +const ( + // HdrContentLength is the HTTP header name of the length of the content part in bytes. This header is required. + // This entity header indicates the size of the entity-body, in bytes, sent to the recipient. + // + // RFC 7230, section 3.3.2: Content-Length: + // https://tools.ietf.org/html/rfc7230#section-3.3.2 + HdrContentLength = "Content-Length" + + // HeaderContentType is the mime type of the content part. Defaults to "application/vscode-jsonrpc; charset=utf-8". + // This entity header is used to indicate the media type of the resource. + // + // RFC 7231, section 3.1.1.5: Content-Type: + // https://tools.ietf.org/html/rfc7231#section-3.1.1.5 + HdrContentType = "Content-Type" + + // HeaderContentSeparator is the header and content part separator. + HdrContentSeparator = "\r\n\r\n" +) + +// Framer wraps a network connection up into a Stream. +// +// It is responsible for the framing and encoding of messages into wire form. +// NewRawStream and NewStream are implementations of a Framer. +type Framer func(conn io.ReadWriteCloser) Stream + +// Stream abstracts the transport mechanics from the JSON RPC protocol. +// +// A Conn reads and writes messages using the stream it was provided on +// construction, and assumes that each call to Read or Write fully transfers +// a single message, or returns an error. +// +// A stream is not safe for concurrent use, it is expected it will be used by +// a single Conn in a safe manner. +type Stream interface { + // Read gets the next message from the stream. + Read(context.Context) (Message, int64, error) + + // Write sends a message to the stream. + Write(context.Context, Message) (int64, error) + + // Close closes the connection. + // Any blocked Read or Write operations will be unblocked and return errors. + Close() error +} + +type rawStream struct { + conn io.ReadWriteCloser + in *stdjson.Decoder +} + +// NewRawStream returns a Stream built on top of a io.ReadWriteCloser. +// +// The messages are sent with no wrapping, and rely on json decode consistency +// to determine message boundaries. +func NewRawStream(conn io.ReadWriteCloser) Stream { + return &rawStream{ + conn: conn, + in: stdjson.NewDecoder(conn), // TODO(zchee): why test fail using segmentio json.Decoder? + } +} + +// Read implements Stream.Read. +func (s *rawStream) Read(ctx context.Context) (Message, int64, error) { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + default: + } + + var raw stdjson.RawMessage + if err := s.in.Decode(&raw); err != nil { + return nil, 0, fmt.Errorf("decoding raw message: %w", err) + } + + msg, err := DecodeMessage(raw) + return msg, int64(len(raw)), err +} + +// Write implements Stream.Write. +func (s *rawStream) Write(ctx context.Context, msg Message) (int64, error) { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + data, err := json.Marshal(msg) + if err != nil { + return 0, fmt.Errorf("marshaling message: %w", err) + } + + n, err := s.conn.Write(data) + if err != nil { + return 0, fmt.Errorf("write to stream: %w", err) + } + + return int64(n), nil +} + +// Close implements Stream.Close. +func (s *rawStream) Close() error { + return s.conn.Close() +} + +type stream struct { + conn io.ReadWriteCloser + in *bufio.Reader +} + +// NewStream returns a Stream built on top of a io.ReadWriteCloser. +// +// The messages are sent with HTTP content length and MIME type headers. +// This is the format used by LSP and others. +func NewStream(conn io.ReadWriteCloser) Stream { + return &stream{ + conn: conn, + in: bufio.NewReader(conn), + } +} + +// Read implements Stream.Read. +func (s *stream) Read(ctx context.Context) (Message, int64, error) { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + default: + } + + var total int64 + var length int64 + // read the header, stop on the first empty line + for { + line, err := s.in.ReadString('\n') + total += int64(len(line)) + if err != nil { + return nil, total, fmt.Errorf("failed reading header line: %w", err) + } + + line = strings.TrimSpace(line) + // check we have a header line + if line == "" { + break + } + + colon := strings.IndexRune(line, ':') + if colon < 0 { + return nil, total, fmt.Errorf("invalid header line %q", line) + } + + name, value := line[:colon], strings.TrimSpace(line[colon+1:]) + switch name { + case HdrContentLength: + if length, err = strconv.ParseInt(value, 10, 32); err != nil { + return nil, total, fmt.Errorf("failed parsing %s: %v: %w", HdrContentLength, value, err) + } + if length <= 0 { + return nil, total, fmt.Errorf("invalid %s: %v", HdrContentLength, length) + } + default: + // ignoring unknown headers + } + } + + if length == 0 { + return nil, total, fmt.Errorf("missing %s header", HdrContentLength) + } + + data := make([]byte, length) + if _, err := io.ReadFull(s.in, data); err != nil { + return nil, total, fmt.Errorf("read full of data: %w", err) + } + + total += length + msg, err := DecodeMessage(data) + return msg, total, err +} + +// Write implements Stream.Write. +func (s *stream) Write(ctx context.Context, msg Message) (int64, error) { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + data, err := json.Marshal(msg) + if err != nil { + return 0, fmt.Errorf("marshaling message: %w", err) + } + + n, err := fmt.Fprintf(s.conn, "%s: %v%s", HdrContentLength, len(data), HdrContentSeparator) + total := int64(n) + if err != nil { + return 0, fmt.Errorf("write data to conn: %w", err) + } + + n, err = s.conn.Write(data) + total += int64(n) + if err != nil { + return 0, fmt.Errorf("write data to conn: %w", err) + } + + return total, nil +} + +// Close implements Stream.Close. +func (s *stream) Close() error { + return s.conn.Close() +} diff --git a/vendor/go.lsp.dev/jsonrpc2/wire.go b/vendor/go.lsp.dev/jsonrpc2/wire.go new file mode 100644 index 00000000000..1fb3f7f2c14 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/wire.go @@ -0,0 +1,140 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "fmt" + + "github.com/segmentio/encoding/json" +) + +// Version represents a JSON-RPC version. +const Version = "2.0" + +// version is a special 0 sized struct that encodes as the jsonrpc version tag. +// +// It will fail during decode if it is not the correct version tag in the stream. +type version struct{} + +// compile time check whether the version implements a json.Marshaler and json.Unmarshaler interfaces. +var ( + _ json.Marshaler = (*version)(nil) + _ json.Unmarshaler = (*version)(nil) +) + +// MarshalJSON implements json.Marshaler. +func (version) MarshalJSON() ([]byte, error) { + return json.Marshal(Version) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (version) UnmarshalJSON(data []byte) error { + version := "" + if err := json.Unmarshal(data, &version); err != nil { + return fmt.Errorf("failed to Unmarshal: %w", err) + } + if version != Version { + return fmt.Errorf("invalid RPC version %v", version) + } + return nil +} + +// ID is a Request identifier. +// +// Only one of either the Name or Number members will be set, using the +// number form if the Name is the empty string. +type ID struct { + name string + number int32 +} + +// compile time check whether the ID implements a fmt.Formatter, json.Marshaler and json.Unmarshaler interfaces. +var ( + _ fmt.Formatter = (*ID)(nil) + _ json.Marshaler = (*ID)(nil) + _ json.Unmarshaler = (*ID)(nil) +) + +// NewNumberID returns a new number request ID. +func NewNumberID(v int32) ID { return ID{number: v} } + +// NewStringID returns a new string request ID. +func NewStringID(v string) ID { return ID{name: v} } + +// Format writes the ID to the formatter. +// +// If the rune is q the representation is non ambiguous, +// string forms are quoted, number forms are preceded by a #. +func (id ID) Format(f fmt.State, r rune) { + numF, strF := `%d`, `%s` + if r == 'q' { + numF, strF = `#%d`, `%q` + } + + switch { + case id.name != "": + fmt.Fprintf(f, strF, id.name) + default: + fmt.Fprintf(f, numF, id.number) + } +} + +// MarshalJSON implements json.Marshaler. +func (id *ID) MarshalJSON() ([]byte, error) { + if id.name != "" { + return json.Marshal(id.name) + } + return json.Marshal(id.number) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (id *ID) UnmarshalJSON(data []byte) error { + *id = ID{} + if err := json.Unmarshal(data, &id.number); err == nil { + return nil + } + return json.Unmarshal(data, &id.name) +} + +// wireRequest is sent to a server to represent a Call or Notify operaton. +type wireRequest struct { + // VersionTag is always encoded as the string "2.0" + VersionTag version `json:"jsonrpc"` + // Method is a string containing the method name to invoke. + Method string `json:"method"` + // Params is either a struct or an array with the parameters of the method. + Params *json.RawMessage `json:"params,omitempty"` + // The id of this request, used to tie the Response back to the request. + // Will be either a string or a number. If not set, the Request is a notify, + // and no response is possible. + ID *ID `json:"id,omitempty"` +} + +// wireResponse is a reply to a Request. +// +// It will always have the ID field set to tie it back to a request, and will +// have either the Result or Error fields set depending on whether it is a +// success or failure wireResponse. +type wireResponse struct { + // VersionTag is always encoded as the string "2.0" + VersionTag version `json:"jsonrpc"` + // Result is the response value, and is required on success. + Result *json.RawMessage `json:"result,omitempty"` + // Error is a structured error response if the call fails. + Error *Error `json:"error,omitempty"` + // ID must be set and is the identifier of the Request this is a response to. + ID *ID `json:"id,omitempty"` +} + +// combined has all the fields of both Request and Response. +// +// We can decode this and then work out which it is. +type combined struct { + VersionTag version `json:"jsonrpc"` + ID *ID `json:"id,omitempty"` + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + Result *json.RawMessage `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} diff --git a/vendor/go.lsp.dev/pkg/LICENSE b/vendor/go.lsp.dev/pkg/LICENSE new file mode 100644 index 00000000000..4551f8c8a69 --- /dev/null +++ b/vendor/go.lsp.dev/pkg/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/pkg/xcontext/xcontext.go b/vendor/go.lsp.dev/pkg/xcontext/xcontext.go new file mode 100644 index 00000000000..df9017e0ca2 --- /dev/null +++ b/vendor/go.lsp.dev/pkg/xcontext/xcontext.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +// Package xcontext is a package to offer the extra functionality we need +// from contexts that is not available from the standard context package. +package xcontext + +import ( + "context" + "time" +) + +// Detach returns a context that keeps all the values of its parent context +// but detaches from the cancellation and error handling. +func Detach(ctx context.Context) context.Context { return detachedContext{ctx} } + +type detachedContext struct{ parent context.Context } + +func (v detachedContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (v detachedContext) Done() <-chan struct{} { return nil } +func (v detachedContext) Err() error { return nil } +func (v detachedContext) Value(key interface{}) interface{} { return v.parent.Value(key) } diff --git a/vendor/go.lsp.dev/protocol/.codecov.yml b/vendor/go.lsp.dev/protocol/.codecov.yml new file mode 100644 index 00000000000..dd726bf10fc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.codecov.yml @@ -0,0 +1,45 @@ +codecov: + allow_coverage_offsets: true + +parsers: + go: + partials_as_hits: true + +coverage: + precision: 1 + round: down + range: "70...100" + + status: + default_rules: + flag_coverage_not_uploaded_behavior: include + + project: + default: + target: auto + threshold: 1% + if_not_found: success + if_ci_failed: error + + patch: + default: + only_pulls: true + target: 50% + threshold: 10% + + changes: + default: + target: auto + threshold: 10% + if_not_found: success + if_ci_failed: error + branches: + - main + +comment: + behavior: default + require_changes: true + show_carryforward_flags: true + +github_checks: + annotations: true diff --git a/vendor/go.lsp.dev/protocol/.gitattributes b/vendor/go.lsp.dev/protocol/.gitattributes new file mode 100644 index 00000000000..a7b063e2172 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.gitattributes @@ -0,0 +1,11 @@ +# go.lsp.dev/protocol project gitattributes file +# https://github.com/github/linguist#using-gitattributes +# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml + +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text + +docs/ linguist-documentation +*.pb.go linguist-generated +*_gen.go linguist-generated +*_string.go linguist-generated diff --git a/vendor/go.lsp.dev/protocol/.gitignore b/vendor/go.lsp.dev/protocol/.gitignore new file mode 100644 index 00000000000..54e59b7493a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.gitignore @@ -0,0 +1,52 @@ +# go.lsp.dev/protocol project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +# please do not open a pull request to add something created by your editor or tools + +# github/gitignore/Go.gitignore +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +# cgo generated +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +# test generated +_testmain.go + +# profile +*.pprof + +# coverage +coverage.* + +# tools +tools/bin/ diff --git a/vendor/go.lsp.dev/protocol/.golangci.yml b/vendor/go.lsp.dev/protocol/.golangci.yml new file mode 100644 index 00000000000..8667ed4cdc0 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.golangci.yml @@ -0,0 +1,242 @@ +# https://golangci-lint.run/usage/configuration/ +# https://github.com/golangci/golangci-lint/blob/master/pkg/config/linters_settings.go +--- +run: + timeout: 1m + issues-exit-code: 1 + tests: true + skip-dirs: [] + skip-dirs-use-default: true + skip-files: [] + allow-parallel-runners: true + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true + uniq-by-line: true + sort-results: true + +linters-settings: + depguard: + list-type: blacklist + include-go-root: false + dupl: + threshold: 150 + errcheck: + check-type-assertions: true + check-blank: true + # exclude: .errcheckignore + errorlint: + errorf: true + asserts: true + comparison: true + funlen: + lines: 100 + statements: 60 + gocognit: + min-complexity: 30 + goconst: + min-len: 3 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - redundantSprint + - whyNoLint + settings: + hugeParam: + sizeThreshold: 80 + rangeExprCopy: + sizeThreshold: 512 + rangeValCopy: + sizeThreshold: 128 + gocyclo: + min-complexity: 30 + godot: + scope: declarations + capital: false + gofmt: + simplify: true + gofumpt: + extra-rules: true + goheader: + values: + const: + AUTHOR: Go Language Server + regexp: + YEAR: '20\d\d' + template: |- + SPDX-FileCopyrightText: {{ YEAR }} The {{ AUTHOR }} Authors + SPDX-License-Identifier: BSD-3-Clause + goimports: + local-prefixes: go.lsp.dev/protocol + gosimple: + go: 1.16 + govet: + enable-all: true + check-shadowing: true + disable: + - fieldalignment + importas: + alias: [] + no-unaliased: true + lll: + line-length: 120 + tab-width: 1 + misspell: + locale: US + ignore-words: + - cancelled + - cancelling + nakedret: + max-func-lines: 30 + nestif: + min-complexity: 4 + prealloc: + simple: true + range-loops: true + for-loops: true + staticcheck: + go: 1.16 + testpackage: + skip-regexp: '.*(export)_test\.go' + unparam: + check-exported: true + algo: cha + unused: + go: 1.16 + whitespace: + multi-if: true + multi-func: true + +linters: + fast: false + disabled: + - exhaustivestruct # Checks if all struct's fields are initialized + - forbidigo # Forbids identifiers + - forcetypeassert # finds forced type assertions + - gci # Gci control golang package import order and make it always deterministic. + - gochecknoglobals # check that no global variables exist + - gochecknoinits # Checks that no init functions are present in Go code + - goconst # Finds repeated strings that could be replaced by a constant + - godox # Tool for detection of FIXME, TODO and other comment keywords + - goerr113 # Golang linter to check the errors handling expressions + - golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + - gomnd # An analyzer to detect magic numbers. + - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + - gomodguard # Allow and block list linter for direct Go module dependencies. + - interfacer # Linter that suggests narrower interface types + - lll # Reports long lines + - maligned # Tool to detect Go structs that would take less memory if their fields were sorted + - promlinter # Check Prometheus metrics naming via promlint + - scopelint # Scopelint checks for unpinned variables in go programs + - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. + - testpackage # TODO(zchee): enable: # linter that makes you use a separate _test package + - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + - wrapcheck # TODO(zchee): enable: # Checks that errors returned from external packages are wrapped + - wsl # Whitespace Linter + enable: + - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + - bodyclose # checks whether HTTP response body is closed successfully + - cyclop # checks function and package cyclomatic complexity + - deadcode # Finds unused code + - depguard # Go linter that checks if package imports are in a list of acceptable packages + - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + - dupl # Tool for code clone detection + - durationcheck # check for two durations multiplied together + - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - exhaustive # check exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - funlen # Tool for detection of long functions + - gocognit # Computes and checks the cognitive complexity of functions + - gocritic # Provides many diagnostics that check for bugs, performance and style issues. + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godot # Check if comments end in a period + - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + - gofumpt # Gofumpt checks whether code was gofumpt-ed. + - goheader # Checks is file header matches to pattern + - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports + - goprintffuncname # Checks that printf-like functions are named with `f` at the end + - gosec # Inspects source code for security problems + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ifshort # Checks that your code uses short syntax for if-statements whenever possible + - importas # Enforces consistent import aliases + - ineffassign # Detects when assignments to existing variables are not used + - makezero # Finds slice declarations with non-zero initial length + - misspell # Finds commonly misspelled English words in comments + - nakedret # Finds naked returns in functions greater than a specified function length + - nestif # Reports deeply nested if statements + - nilerr # Finds the code that returns nil even if it checks that the error is not nil. + - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity + - noctx # noctx finds sending http request without context.Context + - nolintlint # Reports ill-formed or insufficient nolint directives + - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - prealloc # Finds slice declarations that could potentially be preallocated + - predeclared # find code that shadows one of Go's predeclared identifiers + - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. + - rowserrcheck # checks whether Err of rows is checked successfully + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - structcheck # Finds unused struct fields + - stylecheck # Stylecheck is a replacement for golint + - tagliatelle # Checks the struct tags. + - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unconvert # Remove unnecessary type conversions + - unparam # Reports unused function parameters + - unused # Checks Go code for unused constants, variables, functions and types + - varcheck # Finds unused global variables and constants + - wastedassign # wastedassign finds wasted assignment statements. + - whitespace # Tool for detection of leading and trailing whitespace + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + exclude-use-default: true + exclude-rules: + - path: _test\.go + linters: + - cyclop + - dupl + - errcheck + - funlen + - gocognit + - goconst + - gocritic + - gocyclo + - gosec + - thelper + - wrapcheck + - path: "(.*)?_example_test.go" + linters: + - gocritic + # Exclude shadow checking on the variable named err + - text: "shadow: declaration of \"(err|ok)\"" + linters: + - govet + # false positive + - path: language.go + text: "deprecatedComment: the proper format is `Deprecated: `" + # async + - path: handler.go + text: "Error return value of `conn.Notify` is not checked" + linters: + - errcheck + - path: log.go + text: "Error return value of `s.log.Write` is not checked" + linters: + - errcheck + - path: deprecated.go + linters: + - lll + - path: "(client|server)_json.go" + linters: + - nlreturn diff --git a/vendor/go.lsp.dev/protocol/LICENSE b/vendor/go.lsp.dev/protocol/LICENSE new file mode 100644 index 00000000000..e8748709cfb --- /dev/null +++ b/vendor/go.lsp.dev/protocol/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/protocol/Makefile b/vendor/go.lsp.dev/protocol/Makefile new file mode 100644 index 00000000000..f08992fc093 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/Makefile @@ -0,0 +1,126 @@ +# ----------------------------------------------------------------------------- +# global + +.DEFAULT_GOAL := test +comma := , +empty := +space := $(empty) $(empty) + +# ----------------------------------------------------------------------------- +# go + +GO_PATH ?= $(shell go env GOPATH) + +PKG := $(subst $(GO_PATH)/src/,,$(CURDIR)) +CGO_ENABLED ?= 0 +GO_BUILDTAGS=osusergo,netgo,static +GO_LDFLAGS=-s -w "-extldflags=-static" +GO_FLAGS ?= -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' -ldflags='${GO_LDFLAGS}' -installsuffix=netgo + +TOOLS_DIR := ${CURDIR}/tools +TOOLS_BIN := ${TOOLS_DIR}/bin +TOOLS := $(shell cd ${TOOLS_DIR} && go list -v -x -f '{{ join .Imports " " }}' -tags=tools) + +GO_PKGS := ./... + +GO_TEST ?= ${TOOLS_BIN}/gotestsum -- +GO_TEST_PKGS ?= $(shell go list -f='{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./...) +GO_TEST_FLAGS ?= -race -count=1 +GO_TEST_FUNC ?= . +GO_BENCH_FLAGS ?= -benchmem +GO_BENCH_FUNC ?= . +GO_LINT_FLAGS ?= + +# Set build environment +JOBS := $(shell getconf _NPROCESSORS_CONF) + +# ----------------------------------------------------------------------------- +# defines + +define target +@printf "+ $(patsubst ,$@,$(1))\\n" >&2 +endef + +# ----------------------------------------------------------------------------- +# target + +##@ test, bench, coverage + +export GOTESTSUM_FORMAT=standard-verbose + +.PHONY: test +test: CGO_ENABLED=1 +test: tools/bin/gotestsum ## Runs package test including race condition. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -run=${GO_TEST_FUNC} -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' ${GO_TEST_PKGS} + +.PHONY: coverage +coverage: CGO_ENABLED=1 +coverage: tools/bin/gotestsum ## Takes packages test coverage. + $(call target) + CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -covermode=atomic -coverpkg=./... -coverprofile=coverage.out $(strip ${GO_FLAGS}) ${GO_PKGS} + + +##@ fmt, lint + +.PHONY: lint +lint: fmt lint/golangci-lint ## Run all linters. + +.PHONY: fmt +fmt: tools/goimportz tools/gofumpt ## Run goimportz and gofumpt. + $(call target) + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/goimportz -local=${PKG},$(subst /protocol,,$(PKG)) -w + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/gofumpt -extra -w + +.PHONY: lint/golangci-lint +lint/golangci-lint: tools/golangci-lint .golangci.yml ## Run golangci-lint. + $(call target) + ${TOOLS_BIN}/golangci-lint -j ${JOBS} run $(strip ${GO_LINT_FLAGS}) ./... + + +##@ tools + +.PHONY: tools +tools: tools/bin/'' ## Install tools + +tools/%: ## install an individual dependent tool + @${MAKE} tools/bin/$* 1>/dev/null + +tools/bin/%: ${TOOLS_DIR}/go.mod ${TOOLS_DIR}/go.sum + @cd tools; \ + for t in ${TOOLS}; do \ + if [ -z '$*' ] || [ $$(basename $$t) = '$*' ]; then \ + echo "Install $$t ..." >&2; \ + GOBIN=${TOOLS_BIN} CGO_ENABLED=0 go install -mod=mod ${GO_FLAGS} "$${t}"; \ + fi \ + done + + +##@ clean + +.PHONY: clean +clean: ## Cleanups binaries and extra files in the package. + $(call target) + @rm -rf *.out *.test *.prof trace.txt ${TOOLS_BIN} + + +##@ miscellaneous + +.PHONY: todo +TODO: ## Print the all of (TODO|BUG|XXX|FIXME|NOTE) in packages. + @grep -E '(TODO|BUG|XXX|FIXME)(\(.+\):|:)' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*') + +.PHONY: nolint +nolint: ## Print the all of //nolint:... pragma in packages. + @grep -E -C 3 '//nolint.+' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*' -and -not -iwholename '*internal*') + +.PHONY: env/% +env/%: ## Print the value of MAKEFILE_VARIABLE. Use `make env/GO_FLAGS` or etc. + @echo $($*) + + +##@ help + +.PHONY: help +help: ## Show this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[33m\033[0m\n"} /^[a-zA-Z_0-9\/%_-]+:.*?##/ { printf " \033[1;32m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/vendor/go.lsp.dev/protocol/README.md b/vendor/go.lsp.dev/protocol/README.md new file mode 100644 index 00000000000..2f091def7ac --- /dev/null +++ b/vendor/go.lsp.dev/protocol/README.md @@ -0,0 +1,19 @@ +# protocol + +[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga] + +Package protocol implements Language Server Protocol specification in Go. + + + +[circleci]: https://app.circleci.com/pipelines/github/go-language-server/protocol +[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/protocol +[module]: https://github.com/go-language-server/protocol/releases/latest +[codecov]: https://codecov.io/gh/go-language-server/protocol +[ga]: https://github.com/go-language-server/protocol + +[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/protocol/main.svg?style=for-the-badge&label=CIRCLECI&logo=circleci +[pkg.go.dev-badge]: https://bit.ly/pkg-go-dev-badge +[module-badge]: https://img.shields.io/github/release/go-language-server/protocol.svg?color=007D9C&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4= +[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/protocol/main?logo=codecov&style=for-the-badge +[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/protocol?useReferer&pixel diff --git a/vendor/go.lsp.dev/protocol/base.go b/vendor/go.lsp.dev/protocol/base.go new file mode 100644 index 00000000000..3cca2e9f74a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/base.go @@ -0,0 +1,96 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "fmt" + + "github.com/segmentio/encoding/json" +) + +// CancelParams params of cancelRequest. +type CancelParams struct { + // ID is the request id to cancel. + ID interface{} `json:"id"` // int32 | string +} + +// ProgressParams params of Progress netification. +// +// @since 3.15.0. +type ProgressParams struct { + // Token is the progress token provided by the client or server. + Token ProgressToken `json:"token"` + + // Value is the progress data. + Value interface{} `json:"value"` +} + +// ProgressToken is the progress token provided by the client or server. +// +// @since 3.15.0. +type ProgressToken struct { + name string + number int32 +} + +// compile time check whether the ProgressToken implements a fmt.Formatter, fmt.Stringer, json.Marshaler and json.Unmarshaler interfaces. +var ( + _ fmt.Formatter = (*ProgressToken)(nil) + _ fmt.Stringer = (*ProgressToken)(nil) + _ json.Marshaler = (*ProgressToken)(nil) + _ json.Unmarshaler = (*ProgressToken)(nil) +) + +// NewProgressToken returns a new ProgressToken. +func NewProgressToken(s string) *ProgressToken { + return &ProgressToken{name: s} +} + +// NewNumberProgressToken returns a new number ProgressToken. +func NewNumberProgressToken(n int32) *ProgressToken { + return &ProgressToken{number: n} +} + +// Format writes the ProgressToken to the formatter. +// +// If the rune is q the representation is non ambiguous, +// string forms are quoted. +func (v ProgressToken) Format(f fmt.State, r rune) { + const numF = `%d` + strF := `%s` + if r == 'q' { + strF = `%q` + } + + switch { + case v.name != "": + fmt.Fprintf(f, strF, v.name) + default: + fmt.Fprintf(f, numF, v.number) + } +} + +// String returns a string representation of the ProgressToken. +func (v ProgressToken) String() string { + return fmt.Sprint(v) +} + +// MarshalJSON implements json.Marshaler. +func (v *ProgressToken) MarshalJSON() ([]byte, error) { + if v.name != "" { + return json.Marshal(v.name) + } + + return json.Marshal(v.number) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *ProgressToken) UnmarshalJSON(data []byte) error { + *v = ProgressToken{} + if err := json.Unmarshal(data, &v.number); err == nil { + return nil + } + + return json.Unmarshal(data, &v.name) +} diff --git a/vendor/go.lsp.dev/protocol/basic.go b/vendor/go.lsp.dev/protocol/basic.go new file mode 100644 index 00000000000..feb022fea4c --- /dev/null +++ b/vendor/go.lsp.dev/protocol/basic.go @@ -0,0 +1,705 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "go.lsp.dev/uri" +) + +// DocumentURI represents the URI of a document. +// +// Many of the interfaces contain fields that correspond to the URI of a document. +// For clarity, the type of such a field is declared as a DocumentURI. +// Over the wire, it will still be transferred as a string, but this guarantees +// that the contents of that string can be parsed as a valid URI. +type DocumentURI = uri.URI + +// URI a tagging interface for normal non document URIs. +// +// @since 3.16.0. +type URI = uri.URI + +// EOL denotes represents the character offset. +var EOL = []string{"\n", "\r\n", "\r"} + +// Position represents a text document expressed as zero-based line and zero-based character offset. +// +// The offsets are based on a UTF-16 string representation. +// So a string of the form "aš€b" the character offset of the character "a" is 0, +// the character offset of "š€" is 1 and the character offset of "b" is 3 since š€ is represented using two code +// units in UTF-16. +// +// Positions are line end character agnostic. So you can not specify a position that +// denotes "\r|\n" or "\n|" where "|" represents the character offset. +// +// Position is between two characters like an "insert" cursor in a editor. +// Special values like for example "-1" to denote the end of a line are not supported. +type Position struct { + // Line position in a document (zero-based). + // + // If a line number is greater than the number of lines in a document, it defaults back to the number of lines in + // the document. + // If a line number is negative, it defaults to 0. + Line uint32 `json:"line"` + + // Character offset on a line in a document (zero-based). + // + // Assuming that the line is represented as a string, the Character value represents the gap between the + // "character" and "character + 1". + // + // If the character value is greater than the line length it defaults back to the line length. + // If a line number is negative, it defaults to 0. + Character uint32 `json:"character"` +} + +// Range represents a text document expressed as (zero-based) start and end positions. +// +// A range is comparable to a selection in an editor. Therefore the end position is exclusive. +// If you want to specify a range that contains a line including the line ending character(s) then use an end position +// denoting the start of the next line. +type Range struct { + // Start is the range's start position. + Start Position `json:"start"` + + // End is the range's end position. + End Position `json:"end"` +} + +// Location represents a location inside a resource, such as a line inside a text file. +type Location struct { + URI DocumentURI `json:"uri"` + Range Range `json:"range"` +} + +// LocationLink represents a link between a source and a target location. +type LocationLink struct { + // OriginSelectionRange span of the origin of this link. + // + // Used as the underlined span for mouse interaction. Defaults to the word range at the mouse position. + OriginSelectionRange *Range `json:"originSelectionRange,omitempty"` + + // TargetURI is the target resource identifier of this link. + TargetURI DocumentURI `json:"targetUri"` + + // TargetRange is the full target range of this link. + // + // If the target for example is a symbol then target range is the range enclosing this symbol not including + // leading/trailing whitespace but everything else like comments. + // + // This information is typically used to highlight the range in the editor. + TargetRange Range `json:"targetRange"` + + // TargetSelectionRange is the range that should be selected and revealed when this link is being followed, + // e.g the name of a function. + // + // Must be contained by the the TargetRange. See also DocumentSymbol#range + TargetSelectionRange Range `json:"targetSelectionRange"` +} + +// Command represents a reference to a command. Provides a title which will be used to represent a command in the UI. +// +// Commands are identified by a string identifier. +// The recommended way to handle commands is to implement their execution on the server side if the client and +// server provides the corresponding capabilities. +// +// Alternatively the tool extension code could handle the command. The protocol currently doesn't specify +// a set of well-known commands. +type Command struct { + // Title of the command, like `save`. + Title string `json:"title"` + + // Command is the identifier of the actual command handler. + Command string `json:"command"` + + // Arguments that the command handler should be invoked with. + Arguments []interface{} `json:"arguments,omitempty"` +} + +// TextEdit is a textual edit applicable to a text document. +type TextEdit struct { + // Range is the range of the text document to be manipulated. + // + // To insert text into a document create a range where start == end. + Range Range `json:"range"` + + // NewText is the string to be inserted. For delete operations use an + // empty string. + NewText string `json:"newText"` +} + +// ChangeAnnotation is the additional information that describes document changes. +// +// @since 3.16.0. +type ChangeAnnotation struct { + // Label a human-readable string describing the actual change. + // The string is rendered prominent in the user interface. + Label string `json:"label"` + + // NeedsConfirmation is a flag which indicates that user confirmation is needed + // before applying the change. + NeedsConfirmation bool `json:"needsConfirmation,omitempty"` + + // Description is a human-readable string which is rendered less prominent in + // the user interface. + Description string `json:"description,omitempty"` +} + +// ChangeAnnotationIdentifier an identifier referring to a change annotation managed by a workspace +// edit. +// +// @since 3.16.0. +type ChangeAnnotationIdentifier string + +// AnnotatedTextEdit is a special text edit with an additional change annotation. +// +// @since 3.16.0. +type AnnotatedTextEdit struct { + TextEdit + + // AnnotationID is the actual annotation identifier. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId"` +} + +// TextDocumentEdit describes textual changes on a single text document. +// +// The TextDocument is referred to as a OptionalVersionedTextDocumentIdentifier to allow clients to check the +// text document version before an edit is applied. +// +// TextDocumentEdit describes all changes on a version "Si" and after they are applied move the document to +// version "Si+1". +// So the creator of a TextDocumentEdit doesn't need to sort the array or do any kind of ordering. However the +// edits must be non overlapping. +type TextDocumentEdit struct { + // TextDocument is the text document to change. + TextDocument OptionalVersionedTextDocumentIdentifier `json:"textDocument"` + + // Edits is the edits to be applied. + // + // @since 3.16.0 - support for AnnotatedTextEdit. + // This is guarded by the client capability Workspace.WorkspaceEdit.ChangeAnnotationSupport. + Edits []TextEdit `json:"edits"` // []TextEdit | []AnnotatedTextEdit +} + +// ResourceOperationKind is the file event type. +type ResourceOperationKind string + +const ( + // CreateResourceOperation supports creating new files and folders. + CreateResourceOperation ResourceOperationKind = "create" + + // RenameResourceOperation supports renaming existing files and folders. + RenameResourceOperation ResourceOperationKind = "rename" + + // DeleteResourceOperation supports deleting existing files and folders. + DeleteResourceOperation ResourceOperationKind = "delete" +) + +// CreateFileOptions represents an options to create a file. +type CreateFileOptions struct { + // Overwrite existing file. Overwrite wins over `ignoreIfExists`. + Overwrite bool `json:"overwrite,omitempty"` + + // IgnoreIfExists ignore if exists. + IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` +} + +// CreateFile represents a create file operation. +type CreateFile struct { + // Kind a create. + Kind ResourceOperationKind `json:"kind"` // should be `create` + + // URI is the resource to create. + URI DocumentURI `json:"uri"` + + // Options additional options. + Options *CreateFileOptions `json:"options,omitempty"` + + // AnnotationID an optional annotation identifier describing the operation. + // + // @since 3.16.0. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` +} + +// RenameFileOptions represents a rename file options. +type RenameFileOptions struct { + // Overwrite target if existing. Overwrite wins over `ignoreIfExists`. + Overwrite bool `json:"overwrite,omitempty"` + + // IgnoreIfExists ignores if target exists. + IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` +} + +// RenameFile represents a rename file operation. +type RenameFile struct { + // Kind a rename. + Kind ResourceOperationKind `json:"kind"` // should be `rename` + + // OldURI is the old (existing) location. + OldURI DocumentURI `json:"oldUri"` + + // NewURI is the new location. + NewURI DocumentURI `json:"newUri"` + + // Options rename options. + Options *RenameFileOptions `json:"options,omitempty"` + + // AnnotationID an optional annotation identifier describing the operation. + // + // @since 3.16.0. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` +} + +// DeleteFileOptions represents a delete file options. +type DeleteFileOptions struct { + // Recursive delete the content recursively if a folder is denoted. + Recursive bool `json:"recursive,omitempty"` + + // IgnoreIfNotExists ignore the operation if the file doesn't exist. + IgnoreIfNotExists bool `json:"ignoreIfNotExists,omitempty"` +} + +// DeleteFile represents a delete file operation. +type DeleteFile struct { + // Kind is a delete. + Kind ResourceOperationKind `json:"kind"` // should be `delete` + + // URI is the file to delete. + URI DocumentURI `json:"uri"` + + // Options delete options. + Options *DeleteFileOptions `json:"options,omitempty"` + + // AnnotationID an optional annotation identifier describing the operation. + // + // @since 3.16.0. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` +} + +// WorkspaceEdit represent a changes to many resources managed in the workspace. +// +// The edit should either provide changes or documentChanges. +// If the client can handle versioned document edits and if documentChanges are present, the latter are preferred over +// changes. +type WorkspaceEdit struct { + // Changes holds changes to existing resources. + Changes map[DocumentURI][]TextEdit `json:"changes,omitempty"` + + // DocumentChanges depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes + // are either an array of `TextDocumentEdit`s to express changes to n different text documents + // where each text document edit addresses a specific version of a text document. Or it can contain + // above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations. + // + // Whether a client supports versioned document edits is expressed via + // `workspace.workspaceEdit.documentChanges` client capability. + // + // If a client neither supports `documentChanges` nor `workspace.workspaceEdit.resourceOperations` then + // only plain `TextEdit`s using the `changes` property are supported. + DocumentChanges []TextDocumentEdit `json:"documentChanges,omitempty"` + + // ChangeAnnotations is a map of change annotations that can be referenced in + // "AnnotatedTextEdit"s or create, rename and delete file / folder + // operations. + // + // Whether clients honor this property depends on the client capability + // "workspace.changeAnnotationSupport". + // + // @since 3.16.0. + ChangeAnnotations map[ChangeAnnotationIdentifier]ChangeAnnotation `json:"changeAnnotations,omitempty"` +} + +// TextDocumentIdentifier indicates the using a URI. On the protocol level, URIs are passed as strings. +type TextDocumentIdentifier struct { + // URI is the text document's URI. + URI DocumentURI `json:"uri"` +} + +// TextDocumentItem represent an item to transfer a text document from the client to the server. +type TextDocumentItem struct { + // URI is the text document's URI. + URI DocumentURI `json:"uri"` + + // LanguageID is the text document's language identifier. + LanguageID LanguageIdentifier `json:"languageId"` + + // Version is the version number of this document (it will increase after each + // change, including undo/redo). + Version int32 `json:"version"` + + // Text is the content of the opened text document. + Text string `json:"text"` +} + +// LanguageIdentifier represent a text document's language identifier. +type LanguageIdentifier string + +const ( + // ABAPLanguage ABAP Language. + ABAPLanguage LanguageIdentifier = "abap" + + // BatLanguage Windows Bat Language. + BatLanguage LanguageIdentifier = "bat" + + // BibtexLanguage BibTeX Language. + BibtexLanguage LanguageIdentifier = "bibtex" + + // ClojureLanguage Clojure Language. + ClojureLanguage LanguageIdentifier = "clojure" + + // CoffeescriptLanguage CoffeeScript Language. + CoffeeScriptLanguage LanguageIdentifier = "coffeescript" + + // CLanguage C Language. + CLanguage LanguageIdentifier = "c" + + // CppLanguage C++ Language. + CppLanguage LanguageIdentifier = "cpp" + + // CsharpLanguage C# Language. + CsharpLanguage LanguageIdentifier = "csharp" + + // CSSLanguage CSS Language. + CSSLanguage LanguageIdentifier = "css" + + // DiffLanguage Diff Language. + DiffLanguage LanguageIdentifier = "diff" + + // DartLanguage Dart Language. + DartLanguage LanguageIdentifier = "dart" + + // DockerfileLanguage Dockerfile Language. + DockerfileLanguage LanguageIdentifier = "dockerfile" + + // ElixirLanguage Elixir Language. + ElixirLanguage LanguageIdentifier = "elixir" + + // ErlangLanguage Erlang Language. + ErlangLanguage LanguageIdentifier = "erlang" + + // FsharpLanguage F# Language. + FsharpLanguage LanguageIdentifier = "fsharp" + + // GitCommitLanguage Git Language. + GitCommitLanguage LanguageIdentifier = "git-commit" + + // GitRebaseLanguage Git Language. + GitRebaseLanguage LanguageIdentifier = "git-rebase" + + // GoLanguage Go Language. + GoLanguage LanguageIdentifier = "go" + + // GroovyLanguage Groovy Language. + GroovyLanguage LanguageIdentifier = "groovy" + + // HandlebarsLanguage Handlebars Language. + HandlebarsLanguage LanguageIdentifier = "handlebars" + + // HTMLLanguage HTML Language. + HTMLLanguage LanguageIdentifier = "html" + + // IniLanguage Ini Language. + IniLanguage LanguageIdentifier = "ini" + + // JavaLanguage Java Language. + JavaLanguage LanguageIdentifier = "java" + + // JavaScriptLanguage JavaScript Language. + JavaScriptLanguage LanguageIdentifier = "javascript" + + // JavaScriptReactLanguage JavaScript React Language. + JavaScriptReactLanguage LanguageIdentifier = "javascriptreact" + + // JSONLanguage JSON Language. + JSONLanguage LanguageIdentifier = "json" + + // LatexLanguage LaTeX Language. + LatexLanguage LanguageIdentifier = "latex" + + // LessLanguage Less Language. + LessLanguage LanguageIdentifier = "less" + + // LuaLanguage Lua Language. + LuaLanguage LanguageIdentifier = "lua" + + // MakefileLanguage Makefile Language. + MakefileLanguage LanguageIdentifier = "makefile" + + // MarkdownLanguage Markdown Language. + MarkdownLanguage LanguageIdentifier = "markdown" + + // ObjectiveCLanguage Objective-C Language. + ObjectiveCLanguage LanguageIdentifier = "objective-c" + + // ObjectiveCppLanguage Objective-C++ Language. + ObjectiveCppLanguage LanguageIdentifier = "objective-cpp" + + // PerlLanguage Perl Language. + PerlLanguage LanguageIdentifier = "perl" + + // Perl6Language Perl Language. + Perl6Language LanguageIdentifier = "perl6" + + // PHPLanguage PHP Language. + PHPLanguage LanguageIdentifier = "php" + + // PowershellLanguage Powershell Language. + PowershellLanguage LanguageIdentifier = "powershell" + + // JadeLanguage Pug Language. + JadeLanguage LanguageIdentifier = "jade" + + // PythonLanguage Python Language. + PythonLanguage LanguageIdentifier = "python" + + // RLanguage R Language. + RLanguage LanguageIdentifier = "r" + + // RazorLanguage Razor(cshtml) Language. + RazorLanguage LanguageIdentifier = "razor" + + // RubyLanguage Ruby Language. + RubyLanguage LanguageIdentifier = "ruby" + + // RustLanguage Rust Language. + RustLanguage LanguageIdentifier = "rust" + + // SCSSLanguage SCSS Languages syntax using curly brackets. + SCSSLanguage LanguageIdentifier = "scss" + + // SASSLanguage SCSS Languages indented syntax. + SASSLanguage LanguageIdentifier = "sass" + + // ScalaLanguage Scala Language. + ScalaLanguage LanguageIdentifier = "scala" + + // ShaderlabLanguage ShaderLab Language. + ShaderlabLanguage LanguageIdentifier = "shaderlab" + + // ShellscriptLanguage Shell Script (Bash) Language. + ShellscriptLanguage LanguageIdentifier = "shellscript" + + // SQLLanguage SQL Language. + SQLLanguage LanguageIdentifier = "sql" + + // SwiftLanguage Swift Language. + SwiftLanguage LanguageIdentifier = "swift" + + // TypeScriptLanguage TypeScript Language. + TypeScriptLanguage LanguageIdentifier = "typescript" + + // TypeScriptReactLanguage TypeScript React Language. + TypeScriptReactLanguage LanguageIdentifier = "typescriptreact" + + // TeXLanguage TeX Language. + TeXLanguage LanguageIdentifier = "tex" + + // VBLanguage Visual Basic Language. + VBLanguage LanguageIdentifier = "vb" + + // XMLLanguage XML Language. + XMLLanguage LanguageIdentifier = "xml" + + // XslLanguage XSL Language. + XslLanguage LanguageIdentifier = "xsl" + + // YamlLanguage YAML Language. + YamlLanguage LanguageIdentifier = "yaml" +) + +// languageIdentifierMap map of LanguageIdentifiers. +var languageIdentifierMap = map[string]LanguageIdentifier{ + "abap": ABAPLanguage, + "bat": BatLanguage, + "bibtex": BibtexLanguage, + "clojure": ClojureLanguage, + "coffeescript": CoffeeScriptLanguage, + "c": CLanguage, + "cpp": CppLanguage, + "csharp": CsharpLanguage, + "css": CSSLanguage, + "diff": DiffLanguage, + "dart": DartLanguage, + "dockerfile": DockerfileLanguage, + "elixir": ElixirLanguage, + "erlang": ErlangLanguage, + "fsharp": FsharpLanguage, + "git-commit": GitCommitLanguage, + "git-rebase": GitRebaseLanguage, + "go": GoLanguage, + "groovy": GroovyLanguage, + "handlebars": HandlebarsLanguage, + "html": HTMLLanguage, + "ini": IniLanguage, + "java": JavaLanguage, + "javascript": JavaScriptLanguage, + "javascriptreact": JavaScriptReactLanguage, + "json": JSONLanguage, + "latex": LatexLanguage, + "less": LessLanguage, + "lua": LuaLanguage, + "makefile": MakefileLanguage, + "markdown": MarkdownLanguage, + "objective-c": ObjectiveCLanguage, + "objective-cpp": ObjectiveCppLanguage, + "perl": PerlLanguage, + "perl6": Perl6Language, + "php": PHPLanguage, + "powershell": PowershellLanguage, + "jade": JadeLanguage, + "python": PythonLanguage, + "r": RLanguage, + "razor": RazorLanguage, + "ruby": RubyLanguage, + "rust": RustLanguage, + "scss": SCSSLanguage, + "sass": SASSLanguage, + "scala": ScalaLanguage, + "shaderlab": ShaderlabLanguage, + "shellscript": ShellscriptLanguage, + "sql": SQLLanguage, + "swift": SwiftLanguage, + "typescript": TypeScriptLanguage, + "typescriptreact": TypeScriptReactLanguage, + "tex": TeXLanguage, + "vb": VBLanguage, + "xml": XMLLanguage, + "xsl": XslLanguage, + "yaml": YamlLanguage, +} + +// ToLanguageIdentifier converts ft to LanguageIdentifier. +func ToLanguageIdentifier(ft string) LanguageIdentifier { + langID, ok := languageIdentifierMap[ft] + if ok { + return langID + } + + return LanguageIdentifier(ft) +} + +// VersionedTextDocumentIdentifier represents an identifier to denote a specific version of a text document. +// +// This information usually flows from the client to the server. +type VersionedTextDocumentIdentifier struct { + TextDocumentIdentifier + + // Version is the version number of this document. + // + // The version number of a document will increase after each change, including + // undo/redo. The number doesn't need to be consecutive. + Version int32 `json:"version"` +} + +// OptionalVersionedTextDocumentIdentifier represents an identifier which optionally denotes a specific version of +// a text document. +// +// This information usually flows from the server to the client. +// +// @since 3.16.0. +type OptionalVersionedTextDocumentIdentifier struct { + TextDocumentIdentifier + + // Version is the version number of this document. If an optional versioned text document + // identifier is sent from the server to the client and the file is not + // open in the editor (the server has not received an open notification + // before) the server can send `null` to indicate that the version is + // known and the content on disk is the master (as specified with document + // content ownership). + // + // The version number of a document will increase after each change, + // including undo/redo. The number doesn't need to be consecutive. + Version *int32 `json:"version"` // int32 | null +} + +// TextDocumentPositionParams is a parameter literal used in requests to pass a text document and a position +// inside that document. +// +// It is up to the client to decide how a selection is converted into a position when issuing a request for a text +// document. +// +// The client can for example honor or ignore the selection direction to make LSP request consistent with features +// implemented internally. +type TextDocumentPositionParams struct { + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Position is the position inside the text document. + Position Position `json:"position"` +} + +// DocumentFilter is a document filter denotes a document through properties like language, scheme or pattern. +// +// An example is a filter that applies to TypeScript files on disk. +type DocumentFilter struct { + // Language a language id, like `typescript`. + Language string `json:"language,omitempty"` + + // Scheme a URI scheme, like `file` or `untitled`. + Scheme string `json:"scheme,omitempty"` + + // Pattern a glob pattern, like `*.{ts,js}`. + // + // Glob patterns can have the following syntax: + // "*" + // "*" to match one or more characters in a path segment + // "?" + // "?" to match on one character in a path segment + // "**" + // "**" to match any number of path segments, including none + // "{}" + // "{}" to group conditions (e.g. `**/*.{ts,js}` matches all TypeScript and JavaScript files) + // "[]" + // "[]" to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, ā€¦) + // "[!...]" + // "[!...]" to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) + Pattern string `json:"pattern,omitempty"` +} + +// DocumentSelector is a document selector is the combination of one or more document filters. +type DocumentSelector []*DocumentFilter + +// MarkupKind describes the content type that a client supports in various +// result literals like `Hover`, `ParameterInfo` or `CompletionItem`. +// +// Please note that `MarkupKinds` must not start with a `$`. This kinds +// are reserved for internal usage. +type MarkupKind string + +const ( + // PlainText is supported as a content format. + PlainText MarkupKind = "plaintext" + + // Markdown is supported as a content format. + Markdown MarkupKind = "markdown" +) + +// MarkupContent a `MarkupContent` literal represents a string value which content is interpreted base on its +// kind flag. +// +// Currently the protocol supports `plaintext` and `markdown` as markup kinds. +// +// If the kind is `markdown` then the value can contain fenced code blocks like in GitHub issues. +// See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting +// +// Here is an example how such a string can be constructed using JavaScript / TypeScript: +// +// let markdown: MarkdownContent = { +// kind: MarkupKind.Markdown, +// value: [ +// '# Header', +// 'Some text', +// '```typescript', +// 'someCode();', +// '```' +// ].join('\n') +// }; +// +// NOTE: clients might sanitize the return markdown. A client could decide to +// remove HTML from the markdown to avoid script execution. +type MarkupContent struct { + // Kind is the type of the Markup + Kind MarkupKind `json:"kind"` + + // Value is the content itself + Value string `json:"value"` +} diff --git a/vendor/go.lsp.dev/protocol/callhierarchy.go b/vendor/go.lsp.dev/protocol/callhierarchy.go new file mode 100644 index 00000000000..eebb9e39787 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/callhierarchy.go @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// CallHierarchy capabilities specific to the "textDocument/callHierarchy". +// +// @since 3.16.0. +type CallHierarchy struct { + // DynamicRegistration whether implementation supports dynamic registration. + // + // If this is set to "true" the client supports the new + // TextDocumentRegistrationOptions && StaticRegistrationOptions return + // value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// CallHierarchyPrepareParams params of CallHierarchyPrepare. +// +// @since 3.16.0. +type CallHierarchyPrepareParams struct { + TextDocumentPositionParams + WorkDoneProgressParams +} + +// CallHierarchyItem is the result of a "textDocument/prepareCallHierarchy" request. +// +// @since 3.16.0. +type CallHierarchyItem struct { + // name is the name of this item. + Name string `json:"name"` + + // Kind is the kind of this item. + Kind SymbolKind `json:"kind"` + + // Tags for this item. + Tags []SymbolTag `json:"tags,omitempty"` + + // Detail more detail for this item, e.g. the signature of a function. + Detail string `json:"detail,omitempty"` + + // URI is the resource identifier of this item. + URI DocumentURI `json:"uri"` + + // Range is the range enclosing this symbol not including leading/trailing whitespace + // but everything else, e.g. comments and code. + Range Range `json:"range"` + + // SelectionRange is the range that should be selected and revealed when this symbol is being + // picked, e.g. the name of a function. Must be contained by the + // Range. + SelectionRange Range `json:"selectionRange"` + + // Data is a data entry field that is preserved between a call hierarchy prepare and + // incoming calls or outgoing calls requests. + Data interface{} `json:"data,omitempty"` +} + +// CallHierarchyIncomingCallsParams params of CallHierarchyIncomingCalls. +// +// @since 3.16.0. +type CallHierarchyIncomingCallsParams struct { + WorkDoneProgressParams + PartialResultParams + + // Item is the IncomingCalls item. + Item CallHierarchyItem `json:"item"` +} + +// CallHierarchyIncomingCall is the result of a "callHierarchy/incomingCalls" request. +// +// @since 3.16.0. +type CallHierarchyIncomingCall struct { + // From is the item that makes the call. + From CallHierarchyItem `json:"from"` + + // FromRanges is the ranges at which the calls appear. This is relative to the caller + // denoted by From. + FromRanges []Range `json:"fromRanges"` +} + +// CallHierarchyOutgoingCallsParams params of CallHierarchyOutgoingCalls. +// +// @since 3.16.0. +type CallHierarchyOutgoingCallsParams struct { + WorkDoneProgressParams + PartialResultParams + + // Item is the OutgoingCalls item. + Item CallHierarchyItem `json:"item"` +} + +// CallHierarchyOutgoingCall is the result of a "callHierarchy/outgoingCalls" request. +// +// @since 3.16.0. +type CallHierarchyOutgoingCall struct { + // To is the item that is called. + To CallHierarchyItem `json:"to"` + + // FromRanges is the range at which this item is called. This is the range relative to + // the caller, e.g the item passed to "callHierarchy/outgoingCalls" request. + FromRanges []Range `json:"fromRanges"` +} diff --git a/vendor/go.lsp.dev/protocol/capabilities_client.go b/vendor/go.lsp.dev/protocol/capabilities_client.go new file mode 100644 index 00000000000..2d6bb74e3dc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/capabilities_client.go @@ -0,0 +1,1061 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import "strconv" + +// ClientCapabilities now define capabilities for dynamic registration, workspace and text document features +// the client supports. +// +// The experimental can be used to pass experimental capabilities under development. +// +// For future compatibility a ClientCapabilities object literal can have more properties set than currently defined. +// Servers receiving a ClientCapabilities object literal with unknown properties should ignore these properties. +// +// A missing property should be interpreted as an absence of the capability. +// If a missing property normally defines sub properties, all missing sub properties should be interpreted +// as an absence of the corresponding capability. +type ClientCapabilities struct { + // Workspace specific client capabilities. + Workspace *WorkspaceClientCapabilities `json:"workspace,omitempty"` + + // TextDocument specific client capabilities. + TextDocument *TextDocumentClientCapabilities `json:"textDocument,omitempty"` + + // Window specific client capabilities. + Window *WindowClientCapabilities `json:"window,omitempty"` + + // General client capabilities. + // + // @since 3.16.0. + General *GeneralClientCapabilities `json:"general,omitempty"` + + // Experimental client capabilities. + Experimental interface{} `json:"experimental,omitempty"` +} + +// WorkspaceClientCapabilities Workspace specific client capabilities. +type WorkspaceClientCapabilities struct { + // The client supports applying batch edits to the workspace by supporting + // the request "workspace/applyEdit". + ApplyEdit bool `json:"applyEdit,omitempty"` + + // WorkspaceEdit capabilities specific to `WorkspaceEdit`s. + WorkspaceEdit *WorkspaceClientCapabilitiesWorkspaceEdit `json:"workspaceEdit,omitempty"` + + // DidChangeConfiguration capabilities specific to the `workspace/didChangeConfiguration` notification. + DidChangeConfiguration *DidChangeConfigurationWorkspaceClientCapabilities `json:"didChangeConfiguration,omitempty"` + + // DidChangeWatchedFiles capabilities specific to the `workspace/didChangeWatchedFiles` notification. + DidChangeWatchedFiles *DidChangeWatchedFilesWorkspaceClientCapabilities `json:"didChangeWatchedFiles,omitempty"` + + // Symbol capabilities specific to the "workspace/symbol" request. + Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"` + + // ExecuteCommand capabilities specific to the "workspace/executeCommand" request. + ExecuteCommand *ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"` + + // WorkspaceFolders is the client has support for workspace folders. + // + // @since 3.6.0. + WorkspaceFolders bool `json:"workspaceFolders,omitempty"` + + // Configuration is the client supports "workspace/configuration" requests. + // + // @since 3.6.0. + Configuration bool `json:"configuration,omitempty"` + + // SemanticTokens is the capabilities specific to the semantic token requests scoped to the + // workspace. + // + // @since 3.16.0. + SemanticTokens *SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"` + + // CodeLens is the Capabilities specific to the code lens requests scoped to the + // workspace. + // + // @since 3.16.0. + CodeLens *CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"` + + // FileOperations is the client has support for file requests/notifications. + // + // @since 3.16.0. + FileOperations *WorkspaceClientCapabilitiesFileOperations `json:"fileOperations,omitempty"` +} + +// WorkspaceClientCapabilitiesWorkspaceEdit capabilities specific to "WorkspaceEdit"s. +type WorkspaceClientCapabilitiesWorkspaceEdit struct { + // DocumentChanges is the client supports versioned document changes in `WorkspaceEdit`s + DocumentChanges bool `json:"documentChanges,omitempty"` + + // FailureHandling is the failure handling strategy of a client if applying the workspace edit + // fails. + // + // Mostly FailureHandlingKind. + FailureHandling string `json:"failureHandling,omitempty"` + + // ResourceOperations is the resource operations the client supports. Clients should at least + // support "create", "rename" and "delete" files and folders. + ResourceOperations []string `json:"resourceOperations,omitempty"` + + // NormalizesLineEndings whether the client normalizes line endings to the client specific + // setting. + // If set to `true` the client will normalize line ending characters + // in a workspace edit to the client specific new line character(s). + // + // @since 3.16.0. + NormalizesLineEndings bool `json:"normalizesLineEndings,omitempty"` + + // ChangeAnnotationSupport whether the client in general supports change annotations on text edits, + // create file, rename file and delete file changes. + // + // @since 3.16.0. + ChangeAnnotationSupport *WorkspaceClientCapabilitiesWorkspaceEditChangeAnnotationSupport `json:"changeAnnotationSupport,omitempty"` +} + +// FailureHandlingKind is the kind of failure handling . +type FailureHandlingKind string + +const ( + // FailureHandlingKindAbort applying the workspace change is simply aborted if one of the changes provided + // fails. All operations executed before the failing operation stay executed. + FailureHandlingKindAbort FailureHandlingKind = "abort" + + // FailureHandlingKindTransactional all operations are executed transactional. That means they either all + // succeed or no changes at all are applied to the workspace. + FailureHandlingKindTransactional FailureHandlingKind = "transactional" + + // FailureHandlingKindTextOnlyTransactional if the workspace edit contains only textual file changes they are executed transactional. + // If resource changes (create, rename or delete file) are part of the change the failure + // handling strategy is abort. + FailureHandlingKindTextOnlyTransactional FailureHandlingKind = "textOnlyTransactional" + + // FailureHandlingKindUndo the client tries to undo the operations already executed. But there is no + // guarantee that this is succeeding. + FailureHandlingKindUndo FailureHandlingKind = "undo" +) + +// WorkspaceClientCapabilitiesWorkspaceEditChangeAnnotationSupport is the ChangeAnnotationSupport of WorkspaceClientCapabilitiesWorkspaceEdit. +// +// @since 3.16.0. +type WorkspaceClientCapabilitiesWorkspaceEditChangeAnnotationSupport struct { + // GroupsOnLabel whether the client groups edits with equal labels into tree nodes, + // for instance all edits labeled with "Changes in Strings" would + // be a tree node. + GroupsOnLabel bool `json:"groupsOnLabel,omitempty"` +} + +// DidChangeConfigurationWorkspaceClientCapabilities capabilities specific to the "workspace/didChangeConfiguration" notification. +// +// @since 3.16.0. +type DidChangeConfigurationWorkspaceClientCapabilities struct { + // DynamicRegistration whether the did change configuration notification supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DidChangeWatchedFilesWorkspaceClientCapabilities capabilities specific to the "workspace/didChangeWatchedFiles" notification. +// +// @since 3.16.0. +type DidChangeWatchedFilesWorkspaceClientCapabilities struct { + // Did change watched files notification supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// WorkspaceSymbolClientCapabilities capabilities specific to the `workspace/symbol` request. +// +// WorkspaceSymbolClientCapabilities is the workspace symbol request is sent from the client to the server to +// list project-wide symbols matching the query string. +type WorkspaceSymbolClientCapabilities struct { + // DynamicRegistration is the Symbol request supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // SymbolKindCapabilities is the specific capabilities for the SymbolKindCapabilities in the "workspace/symbol" request. + SymbolKind *SymbolKindCapabilities `json:"symbolKind,omitempty"` + + // TagSupport is the client supports tags on `SymbolInformation`. + // Clients supporting tags have to handle unknown tags gracefully. + // + // @since 3.16.0 + TagSupport *TagSupportCapabilities `json:"tagSupport,omitempty"` +} + +type SymbolKindCapabilities struct { + // ValueSet is the symbol kind values the client supports. When this + // property exists the client also guarantees that it will + // handle values outside its set gracefully and falls back + // to a default value when unknown. + // + // If this property is not present the client only supports + // the symbol kinds from `File` to `Array` as defined in + // the initial version of the protocol. + ValueSet []SymbolKind `json:"valueSet,omitempty"` +} + +type TagSupportCapabilities struct { + // ValueSet is the tags supported by the client. + ValueSet []SymbolTag `json:"valueSet,omitempty"` +} + +// ExecuteCommandClientCapabilities capabilities specific to the "workspace/executeCommand" request. +type ExecuteCommandClientCapabilities struct { + // DynamicRegistration Execute command supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// SemanticTokensWorkspaceClientCapabilities capabilities specific to the "workspace/semanticToken" request. +// +// @since 3.16.0. +type SemanticTokensWorkspaceClientCapabilities struct { + // RefreshSupport whether the client implementation supports a refresh request sent from + // the server to the client. + // + // Note that this event is global and will force the client to refresh all + // semantic tokens currently shown. It should be used with absolute care + // and is useful for situation where a server for example detect a project + // wide change that requires such a calculation. + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +// CodeLensWorkspaceClientCapabilities capabilities specific to the "workspace/codeLens" request. +// +// @since 3.16.0. +type CodeLensWorkspaceClientCapabilities struct { + // RefreshSupport whether the client implementation supports a refresh request sent from the + // server to the client. + // + // Note that this event is global and will force the client to refresh all + // code lenses currently shown. It should be used with absolute care and is + // useful for situation where a server for example detect a project wide + // change that requires such a calculation. + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +// WorkspaceClientCapabilitiesFileOperations capabilities specific to the fileOperations. +// +// @since 3.16.0. +type WorkspaceClientCapabilitiesFileOperations struct { + // DynamicRegistration whether the client supports dynamic registration for file + // requests/notifications. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // DidCreate is the client has support for sending didCreateFiles notifications. + DidCreate bool `json:"didCreate,omitempty"` + + // WillCreate is the client has support for sending willCreateFiles requests. + WillCreate bool `json:"willCreate,omitempty"` + + // DidRename is the client has support for sending didRenameFiles notifications. + DidRename bool `json:"didRename,omitempty"` + + // WillRename is the client has support for sending willRenameFiles requests. + WillRename bool `json:"willRename,omitempty"` + + // DidDelete is the client has support for sending didDeleteFiles notifications. + DidDelete bool `json:"didDelete,omitempty"` + + // WillDelete is the client has support for sending willDeleteFiles requests. + WillDelete bool `json:"willDelete,omitempty"` +} + +// TextDocumentClientCapabilities Text document specific client capabilities. +type TextDocumentClientCapabilities struct { + // Synchronization defines which synchronization capabilities the client supports. + Synchronization *TextDocumentSyncClientCapabilities `json:"synchronization,omitempty"` + + // Completion Capabilities specific to the "textDocument/completion". + Completion *CompletionTextDocumentClientCapabilities `json:"completion,omitempty"` + + // Hover capabilities specific to the "textDocument/hover". + Hover *HoverTextDocumentClientCapabilities `json:"hover,omitempty"` + + // SignatureHelp capabilities specific to the "textDocument/signatureHelp". + SignatureHelp *SignatureHelpTextDocumentClientCapabilities `json:"signatureHelp,omitempty"` + + // Declaration capabilities specific to the "textDocument/declaration". + Declaration *DeclarationTextDocumentClientCapabilities `json:"declaration,omitempty"` + + // Definition capabilities specific to the "textDocument/definition". + // + // @since 3.14.0. + Definition *DefinitionTextDocumentClientCapabilities `json:"definition,omitempty"` + + // TypeDefinition capabilities specific to the "textDocument/typeDefinition". + // + // @since 3.6.0. + TypeDefinition *TypeDefinitionTextDocumentClientCapabilities `json:"typeDefinition,omitempty"` + + // Implementation capabilities specific to the "textDocument/implementation". + // + // @since 3.6.0. + Implementation *ImplementationTextDocumentClientCapabilities `json:"implementation,omitempty"` + + // References capabilities specific to the "textDocument/references". + References *ReferencesTextDocumentClientCapabilities `json:"references,omitempty"` + + // DocumentHighlight capabilities specific to the "textDocument/documentHighlight". + DocumentHighlight *DocumentHighlightClientCapabilities `json:"documentHighlight,omitempty"` + + // DocumentSymbol capabilities specific to the "textDocument/documentSymbol". + DocumentSymbol *DocumentSymbolClientCapabilities `json:"documentSymbol,omitempty"` + + // CodeAction capabilities specific to the "textDocument/codeAction". + CodeAction *CodeActionClientCapabilities `json:"codeAction,omitempty"` + + // CodeLens capabilities specific to the "textDocument/codeLens". + CodeLens *CodeLensClientCapabilities `json:"codeLens,omitempty"` + + // DocumentLink capabilities specific to the "textDocument/documentLink". + DocumentLink *DocumentLinkClientCapabilities `json:"documentLink,omitempty"` + + // ColorProvider capabilities specific to the "textDocument/documentColor" and the + // "textDocument/colorPresentation" request. + // + // @since 3.6.0. + ColorProvider *DocumentColorClientCapabilities `json:"colorProvider,omitempty"` + + // Formatting Capabilities specific to the "textDocument/formatting" request. + Formatting *DocumentFormattingClientCapabilities `json:"formatting,omitempty"` + + // RangeFormatting Capabilities specific to the "textDocument/rangeFormatting" request. + RangeFormatting *DocumentRangeFormattingClientCapabilities `json:"rangeFormatting,omitempty"` + + // OnTypeFormatting Capabilities specific to the "textDocument/onTypeFormatting" request. + OnTypeFormatting *DocumentOnTypeFormattingClientCapabilities `json:"onTypeFormatting,omitempty"` + + // PublishDiagnostics capabilities specific to "textDocument/publishDiagnostics". + PublishDiagnostics *PublishDiagnosticsClientCapabilities `json:"publishDiagnostics,omitempty"` + + // Rename capabilities specific to the "textDocument/rename". + Rename *RenameClientCapabilities `json:"rename,omitempty"` + + // FoldingRange capabilities specific to "textDocument/foldingRange" requests. + // + // @since 3.10.0. + FoldingRange *FoldingRangeClientCapabilities `json:"foldingRange,omitempty"` + + // SelectionRange capabilities specific to "textDocument/selectionRange" requests. + // + // @since 3.15.0. + SelectionRange *SelectionRangeClientCapabilities `json:"selectionRange,omitempty"` + + // CallHierarchy capabilities specific to the various call hierarchy requests. + // + // @since 3.16.0. + CallHierarchy *CallHierarchyClientCapabilities `json:"callHierarchy,omitempty"` + + // SemanticTokens capabilities specific to the various semantic token requests. + // + // @since 3.16.0. + SemanticTokens *SemanticTokensClientCapabilities `json:"semanticTokens,omitempty"` + + // LinkedEditingRange capabilities specific to the "textDocument/linkedEditingRange" request. + // + // @since 3.16.0. + LinkedEditingRange *LinkedEditingRangeClientCapabilities `json:"linkedEditingRange,omitempty"` + + // Moniker capabilities specific to the "textDocument/moniker" request. + // + // @since 3.16.0. + Moniker *MonikerClientCapabilities `json:"moniker,omitempty"` +} + +// TextDocumentSyncClientCapabilities defines which synchronization capabilities the client supports. +type TextDocumentSyncClientCapabilities struct { + // DynamicRegistration whether text document synchronization supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // WillSave is the client supports sending will save notifications. + WillSave bool `json:"willSave,omitempty"` + + // WillSaveWaitUntil is the client supports sending a will save request and + // waits for a response providing text edits which will + // be applied to the document before it is saved. + WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` + + // DidSave is the client supports did save notifications. + DidSave bool `json:"didSave,omitempty"` +} + +// CompletionTextDocumentClientCapabilities Capabilities specific to the "textDocument/completion". +type CompletionTextDocumentClientCapabilities struct { + // Whether completion supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // The client supports the following `CompletionItem` specific + // capabilities. + CompletionItem *CompletionTextDocumentClientCapabilitiesItem `json:"completionItem,omitempty"` + + CompletionItemKind *CompletionTextDocumentClientCapabilitiesItemKind `json:"completionItemKind,omitempty"` + + // ContextSupport is the client supports to send additional context information for a + // `textDocument/completion` request. + ContextSupport bool `json:"contextSupport,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItem is the client supports the following "CompletionItem" specific +// capabilities. +type CompletionTextDocumentClientCapabilitiesItem struct { + // SnippetSupport client supports snippets as insert text. + // + // A snippet can define tab stops and placeholders with `$1`, `$2` + // and `${3:foo}`. `$0` defines the final tab stop, it defaults to + // the end of the snippet. Placeholders with equal identifiers are linked, + // that is typing in one will update others too. + SnippetSupport bool `json:"snippetSupport,omitempty"` + + // CommitCharactersSupport client supports commit characters on a completion item. + CommitCharactersSupport bool `json:"commitCharactersSupport,omitempty"` + + // DocumentationFormat client supports the follow content formats for the documentation + // property. The order describes the preferred format of the client. + DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"` + + // DeprecatedSupport client supports the deprecated property on a completion item. + DeprecatedSupport bool `json:"deprecatedSupport,omitempty"` + + // PreselectSupport client supports the preselect property on a completion item. + PreselectSupport bool `json:"preselectSupport,omitempty"` + + // TagSupport is the client supports the tag property on a completion item. + // + // Clients supporting tags have to handle unknown tags gracefully. + // Clients especially need to preserve unknown tags when sending + // a completion item back to the server in a resolve call. + // + // @since 3.15.0. + TagSupport *CompletionTextDocumentClientCapabilitiesItemTagSupport `json:"tagSupport,omitempty"` + + // InsertReplaceSupport client supports insert replace edit to control different behavior if + // a completion item is inserted in the text or should replace text. + // + // @since 3.16.0. + InsertReplaceSupport bool `json:"insertReplaceSupport,omitempty"` + + // ResolveSupport indicates which properties a client can resolve lazily on a + // completion item. Before version 3.16.0 only the predefined properties + // `documentation` and `details` could be resolved lazily. + // + // @since 3.16.0. + ResolveSupport *CompletionTextDocumentClientCapabilitiesItemResolveSupport `json:"resolveSupport,omitempty"` + + // InsertTextModeSupport is the client supports the `insertTextMode` property on + // a completion item to override the whitespace handling mode + // as defined by the client (see `insertTextMode`). + // + // @since 3.16.0. + InsertTextModeSupport *CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport `json:"insertTextModeSupport,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItemTagSupport specific capabilities for the "TagSupport" in the "textDocument/completion" request. +// +// @since 3.15.0. +type CompletionTextDocumentClientCapabilitiesItemTagSupport struct { + // ValueSet is the tags supported by the client. + // + // @since 3.15.0. + ValueSet []CompletionItemTag `json:"valueSet,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItemResolveSupport specific capabilities for the ResolveSupport in the CompletionTextDocumentClientCapabilitiesItem. +// +// @since 3.16.0. +type CompletionTextDocumentClientCapabilitiesItemResolveSupport struct { + // Properties is the properties that a client can resolve lazily. + Properties []string `json:"properties"` +} + +// CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport specific capabilities for the InsertTextModeSupport in the CompletionTextDocumentClientCapabilitiesItem. +// +// @since 3.16.0. +type CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport struct { + // ValueSet is the tags supported by the client. + // + // @since 3.16.0. + ValueSet []InsertTextMode `json:"valueSet,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItemKind specific capabilities for the "CompletionItemKind" in the "textDocument/completion" request. +type CompletionTextDocumentClientCapabilitiesItemKind struct { + // The completion item kind values the client supports. When this + // property exists the client also guarantees that it will + // handle values outside its set gracefully and falls back + // to a default value when unknown. + // + // If this property is not present the client only supports + // the completion items kinds from `Text` to `Reference` as defined in + // the initial version of the protocol. + // + ValueSet []CompletionItemKind `json:"valueSet,omitempty"` +} + +// HoverTextDocumentClientCapabilities capabilities specific to the "textDocument/hover". +type HoverTextDocumentClientCapabilities struct { + // DynamicRegistration whether hover supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // ContentFormat is the client supports the follow content formats for the content + // property. The order describes the preferred format of the client. + ContentFormat []MarkupKind `json:"contentFormat,omitempty"` +} + +// SignatureHelpTextDocumentClientCapabilities capabilities specific to the "textDocument/signatureHelp". +type SignatureHelpTextDocumentClientCapabilities struct { + // DynamicRegistration whether signature help supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // SignatureInformation is the client supports the following "SignatureInformation" + // specific properties. + SignatureInformation *TextDocumentClientCapabilitiesSignatureInformation `json:"signatureInformation,omitempty"` + + // ContextSupport is the client supports to send additional context information for a "textDocument/signatureHelp" request. + // + // A client that opts into contextSupport will also support the "retriggerCharacters" on "SignatureHelpOptions". + // + // @since 3.15.0. + ContextSupport bool `json:"contextSupport,omitempty"` +} + +// TextDocumentClientCapabilitiesSignatureInformation is the client supports the following "SignatureInformation" +// specific properties. +type TextDocumentClientCapabilitiesSignatureInformation struct { + // DocumentationFormat is the client supports the follow content formats for the documentation + // property. The order describes the preferred format of the client. + DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"` + + // ParameterInformation is the Client capabilities specific to parameter information. + ParameterInformation *TextDocumentClientCapabilitiesParameterInformation `json:"parameterInformation,omitempty"` + + // ActiveParameterSupport is the client supports the `activeParameter` property on + // `SignatureInformation` literal. + // + // @since 3.16.0. + ActiveParameterSupport bool `json:"activeParameterSupport,omitempty"` +} + +// TextDocumentClientCapabilitiesParameterInformation is the client capabilities specific to parameter information. +type TextDocumentClientCapabilitiesParameterInformation struct { + // LabelOffsetSupport is the client supports processing label offsets instead of a + // simple label string. + // + // @since 3.14.0. + LabelOffsetSupport bool `json:"labelOffsetSupport,omitempty"` +} + +// DeclarationTextDocumentClientCapabilities capabilities specific to the "textDocument/declaration". +type DeclarationTextDocumentClientCapabilities struct { + // DynamicRegistration whether declaration supports dynamic registration. If this is set to `true` + // the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of declaration links. + // + // @since 3.14.0. + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// DefinitionTextDocumentClientCapabilities capabilities specific to the "textDocument/definition". +// +// @since 3.14.0. +type DefinitionTextDocumentClientCapabilities struct { + // DynamicRegistration whether definition supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of definition links. + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// TypeDefinitionTextDocumentClientCapabilities capabilities specific to the "textDocument/typeDefinition". +// +// @since 3.6.0. +type TypeDefinitionTextDocumentClientCapabilities struct { + // DynamicRegistration whether typeDefinition supports dynamic registration. If this is set to `true` + // the client supports the new "(TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of definition links. + // + // @since 3.14.0 + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// ImplementationTextDocumentClientCapabilities capabilities specific to the "textDocument/implementation". +// +// @since 3.6.0. +type ImplementationTextDocumentClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to `true` + // the client supports the new "(TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of definition links. + // + // @since 3.14.0 + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// ReferencesTextDocumentClientCapabilities capabilities specific to the "textDocument/references". +type ReferencesTextDocumentClientCapabilities struct { + // DynamicRegistration whether references supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentHighlightClientCapabilities capabilities specific to the "textDocument/documentHighlight". +type DocumentHighlightClientCapabilities struct { + // DynamicRegistration Whether document highlight supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentSymbolClientCapabilities capabilities specific to the "textDocument/documentSymbol". +type DocumentSymbolClientCapabilities struct { + // DynamicRegistration whether document symbol supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // SymbolKind specific capabilities for the "SymbolKindCapabilities". + SymbolKind *SymbolKindCapabilities `json:"symbolKind,omitempty"` + + // HierarchicalDocumentSymbolSupport is the client support hierarchical document symbols. + HierarchicalDocumentSymbolSupport bool `json:"hierarchicalDocumentSymbolSupport,omitempty"` + + // TagSupport is the client supports tags on "SymbolInformation". Tags are supported on + // "DocumentSymbol" if "HierarchicalDocumentSymbolSupport" is set to true. + // Clients supporting tags have to handle unknown tags gracefully. + // + // @since 3.16.0. + TagSupport *DocumentSymbolClientCapabilitiesTagSupport `json:"tagSupport,omitempty"` + + // LabelSupport is the client supports an additional label presented in the UI when + // registering a document symbol provider. + // + // @since 3.16.0. + LabelSupport bool `json:"labelSupport,omitempty"` +} + +// DocumentSymbolClientCapabilitiesTagSupport TagSupport in the DocumentSymbolClientCapabilities. +// +// @since 3.16.0. +type DocumentSymbolClientCapabilitiesTagSupport struct { + // ValueSet is the tags supported by the client. + ValueSet []SymbolTag `json:"valueSet"` +} + +// CodeActionClientCapabilities capabilities specific to the "textDocument/codeAction". +type CodeActionClientCapabilities struct { + // DynamicRegistration whether code action supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // CodeActionLiteralSupport is the client support code action literals as a valid + // response of the "textDocument/codeAction" request. + // + // @since 3.8.0 + CodeActionLiteralSupport *CodeActionClientCapabilitiesLiteralSupport `json:"codeActionLiteralSupport,omitempty"` + + // IsPreferredSupport whether code action supports the "isPreferred" property. + // + // @since 3.15.0. + IsPreferredSupport bool `json:"isPreferredSupport,omitempty"` + + // DisabledSupport whether code action supports the `disabled` property. + // + // @since 3.16.0. + DisabledSupport bool `json:"disabledSupport,omitempty"` + + // DataSupport whether code action supports the `data` property which is + // preserved between a `textDocument/codeAction` and a + // `codeAction/resolve` request. + // + // @since 3.16.0. + DataSupport bool `json:"dataSupport,omitempty"` + + // ResolveSupport whether the client supports resolving additional code action + // properties via a separate `codeAction/resolve` request. + // + // @since 3.16.0. + ResolveSupport *CodeActionClientCapabilitiesResolveSupport `json:"resolveSupport,omitempty"` + + // HonorsChangeAnnotations whether the client honors the change annotations in + // text edits and resource operations returned via the + // `CodeAction#edit` property by for example presenting + // the workspace edit in the user interface and asking + // for confirmation. + // + // @since 3.16.0. + HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` +} + +// CodeActionClientCapabilitiesLiteralSupport is the client support code action literals as a valid response of the "textDocument/codeAction" request. +type CodeActionClientCapabilitiesLiteralSupport struct { + // CodeActionKind is the code action kind is support with the following value + // set. + CodeActionKind *CodeActionClientCapabilitiesKind `json:"codeActionKind"` +} + +// CodeActionClientCapabilitiesKind is the code action kind is support with the following value set. +type CodeActionClientCapabilitiesKind struct { + // ValueSet is the code action kind values the client supports. When this + // property exists the client also guarantees that it will + // handle values outside its set gracefully and falls back + // to a default value when unknown. + ValueSet []CodeActionKind `json:"valueSet"` +} + +// CodeActionClientCapabilitiesResolveSupport ResolveSupport in the CodeActionClientCapabilities. +// +// @since 3.16.0. +type CodeActionClientCapabilitiesResolveSupport struct { + // Properties is the properties that a client can resolve lazily. + Properties []string `json:"properties"` +} + +// CodeLensClientCapabilities capabilities specific to the "textDocument/codeLens". +type CodeLensClientCapabilities struct { + // DynamicRegistration Whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentLinkClientCapabilities capabilities specific to the "textDocument/documentLink". +type DocumentLinkClientCapabilities struct { + // DynamicRegistration whether document link supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // TooltipSupport whether the client supports the "tooltip" property on "DocumentLink". + // + // @since 3.15.0. + TooltipSupport bool `json:"tooltipSupport,omitempty"` +} + +// DocumentColorClientCapabilities capabilities specific to the "textDocument/documentColor" and the +// "textDocument/colorPresentation" request. +// +// @since 3.6.0. +type DocumentColorClientCapabilities struct { + // DynamicRegistration whether colorProvider supports dynamic registration. If this is set to `true` + // the client supports the new "(ColorProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentFormattingClientCapabilities capabilities specific to the "textDocument/formatting". +type DocumentFormattingClientCapabilities struct { + // DynamicRegistration whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentRangeFormattingClientCapabilities capabilities specific to the "textDocument/rangeFormatting". +type DocumentRangeFormattingClientCapabilities struct { + // DynamicRegistration whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentOnTypeFormattingClientCapabilities capabilities specific to the "textDocument/onTypeFormatting". +type DocumentOnTypeFormattingClientCapabilities struct { + // DynamicRegistration whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// PublishDiagnosticsClientCapabilities capabilities specific to "textDocument/publishDiagnostics". +type PublishDiagnosticsClientCapabilities struct { + // RelatedInformation whether the clients accepts diagnostics with related information. + RelatedInformation bool `json:"relatedInformation,omitempty"` + + // TagSupport clients supporting tags have to handle unknown tags gracefully. + // + // @since 3.15.0. + TagSupport *PublishDiagnosticsClientCapabilitiesTagSupport `json:"tagSupport,omitempty"` + + // VersionSupport whether the client interprets the version property of the + // "textDocument/publishDiagnostics" notification`s parameter. + // + // @since 3.15.0. + VersionSupport bool `json:"versionSupport,omitempty"` + + // CodeDescriptionSupport client supports a codeDescription property + // + // @since 3.16.0. + CodeDescriptionSupport bool `json:"codeDescriptionSupport,omitempty"` + + // DataSupport whether code action supports the `data` property which is + // preserved between a `textDocument/publishDiagnostics` and + // `textDocument/codeAction` request. + // + // @since 3.16.0. + DataSupport bool `json:"dataSupport,omitempty"` +} + +// PublishDiagnosticsClientCapabilitiesTagSupport is the client capacity of TagSupport. +// +// @since 3.15.0. +type PublishDiagnosticsClientCapabilitiesTagSupport struct { + // ValueSet is the tags supported by the client. + ValueSet []DiagnosticTag `json:"valueSet"` +} + +// RenameClientCapabilities capabilities specific to the "textDocument/rename". +type RenameClientCapabilities struct { + // DynamicRegistration whether rename supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // PrepareSupport is the client supports testing for validity of rename operations + // before execution. + PrepareSupport bool `json:"prepareSupport,omitempty"` + + // PrepareSupportDefaultBehavior client supports the default behavior result + // (`{ defaultBehavior: boolean }`). + // + // The value indicates the default behavior used by the + // client. + // + // @since 3.16.0. + PrepareSupportDefaultBehavior PrepareSupportDefaultBehavior `json:"prepareSupportDefaultBehavior,omitempty"` + + // HonorsChangeAnnotations whether th client honors the change annotations in + // text edits and resource operations returned via the + // rename request's workspace edit by for example presenting + // the workspace edit in the user interface and asking + // for confirmation. + // + // @since 3.16.0. + HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` +} + +// PrepareSupportDefaultBehavior default behavior of PrepareSupport. +// +// @since 3.16.0. +type PrepareSupportDefaultBehavior float64 + +// list of PrepareSupportDefaultBehavior. +const ( + // PrepareSupportDefaultBehaviorIdentifier is the client's default behavior is to select the identifier + // according the to language's syntax rule. + PrepareSupportDefaultBehaviorIdentifier PrepareSupportDefaultBehavior = 1 +) + +// String returns a string representation of the PrepareSupportDefaultBehavior. +func (k PrepareSupportDefaultBehavior) String() string { + switch k { + case PrepareSupportDefaultBehaviorIdentifier: + return "Identifier" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// FoldingRangeClientCapabilities capabilities specific to "textDocument/foldingRange" requests. +// +// @since 3.10.0. +type FoldingRangeClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration for folding range providers. If this is set to `true` + // the client supports the new "(FoldingRangeProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // RangeLimit is the maximum number of folding ranges that the client prefers to receive per document. The value serves as a + // hint, servers are free to follow the limit. + RangeLimit uint32 `json:"rangeLimit,omitempty"` + + // LineFoldingOnly if set, the client signals that it only supports folding complete lines. If set, client will + // ignore specified "startCharacter" and "endCharacter" properties in a FoldingRange. + LineFoldingOnly bool `json:"lineFoldingOnly,omitempty"` +} + +// SelectionRangeClientCapabilities capabilities specific to "textDocument/selectionRange" requests. +// +// @since 3.16.0. +type SelectionRangeClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration for selection range providers. If this is set to `true` + // the client supports the new "(SelectionRangeProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// CallHierarchyClientCapabilities capabilities specific to "textDocument/callHierarchy" requests. +// +// @since 3.16.0. +type CallHierarchyClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to + // `true` the client supports the new `(TextDocumentRegistrationOptions & + // StaticRegistrationOptions)` return value for the corresponding server + // capability as well.} + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// SemanticTokensClientCapabilities capabilities specific to the "textDocument.semanticTokens" request. +// +// @since 3.16.0. +type SemanticTokensClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to + // `true` the client supports the new `(TextDocumentRegistrationOptions & + // StaticRegistrationOptions)` return value for the corresponding server + // capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // Requests which requests the client supports and might send to the server + // depending on the server's capability. Please note that clients might not + // show semantic tokens or degrade some of the user experience if a range + // or full request is advertised by the client but not provided by the + // server. If for example the client capability `requests.full` and + // `request.range` are both set to true but the server only provides a + // range provider the client might not render a minimap correctly or might + // even decide to not show any semantic tokens at all. + Requests SemanticTokensWorkspaceClientCapabilitiesRequests `json:"requests"` + + // TokenTypes is the token types that the client supports. + TokenTypes []string `json:"tokenTypes"` + + // TokenModifiers is the token modifiers that the client supports. + TokenModifiers []string `json:"tokenModifiers"` + + // Formats is the formats the clients supports. + Formats []TokenFormat `json:"formats"` + + // OverlappingTokenSupport whether the client supports tokens that can overlap each other. + OverlappingTokenSupport bool `json:"overlappingTokenSupport,omitempty"` + + // MultilineTokenSupport whether the client supports tokens that can span multiple lines. + MultilineTokenSupport bool `json:"multilineTokenSupport,omitempty"` +} + +// SemanticTokensWorkspaceClientCapabilitiesRequests capabilities specific to the "textDocument/semanticTokens/xxx" request. +// +// @since 3.16.0. +type SemanticTokensWorkspaceClientCapabilitiesRequests struct { + // Range is the client will send the "textDocument/semanticTokens/range" request + // if the server provides a corresponding handler. + Range bool `json:"range,omitempty"` + + // Full is the client will send the "textDocument/semanticTokens/full" request + // if the server provides a corresponding handler. The client will send the + // `textDocument/semanticTokens/full/delta` request if the server provides a + // corresponding handler. + Full interface{} `json:"full,omitempty"` +} + +// LinkedEditingRangeClientCapabilities capabilities specific to "textDocument/linkedEditingRange" requests. +// +// @since 3.16.0. +type LinkedEditingRangeClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. + // If this is set to `true` the client supports the new + // `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// MonikerClientCapabilities capabilities specific to the "textDocument/moniker" request. +// +// @since 3.16.0. +type MonikerClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to + // `true` the client supports the new `(TextDocumentRegistrationOptions & + // StaticRegistrationOptions)` return value for the corresponding server + // capability as well.// DynamicRegistration whether implementation supports dynamic registration. If this is set to + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// WindowClientCapabilities represents a WindowClientCapabilities specific client capabilities. +// +// @since 3.15.0. +type WindowClientCapabilities struct { + // WorkDoneProgress whether client supports handling progress notifications. If set servers are allowed to + // report in "workDoneProgress" property in the request specific server capabilities. + // + // @since 3.15.0. + WorkDoneProgress bool `json:"workDoneProgress,omitempty"` + + // ShowMessage capabilities specific to the showMessage request. + // + // @since 3.16.0. + ShowMessage *ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"` + + // ShowDocument client capabilities for the show document request. + // + // @since 3.16.0. + ShowDocument *ShowDocumentClientCapabilities `json:"showDocument,omitempty"` +} + +// ShowMessageRequestClientCapabilities show message request client capabilities. +// +// @since 3.16.0. +type ShowMessageRequestClientCapabilities struct { + // MessageActionItem capabilities specific to the "MessageActionItem" type. + MessageActionItem *ShowMessageRequestClientCapabilitiesMessageActionItem `json:"messageActionItem,omitempty"` +} + +// ShowMessageRequestClientCapabilitiesMessageActionItem capabilities specific to the "MessageActionItem" type. +// +// @since 3.16.0. +type ShowMessageRequestClientCapabilitiesMessageActionItem struct { + // AdditionalPropertiesSupport whether the client supports additional attributes which + // are preserved and sent back to the server in the + // request's response. + AdditionalPropertiesSupport bool `json:"additionalPropertiesSupport,omitempty"` +} + +// ShowDocumentClientCapabilities client capabilities for the show document request. +// +// @since 3.16.0. +type ShowDocumentClientCapabilities struct { + // Support is the client has support for the show document + // request. + Support bool `json:"support"` +} + +// GeneralClientCapabilities represents a General specific client capabilities. +// +// @since 3.16.0. +type GeneralClientCapabilities struct { + // RegularExpressions is the client capabilities specific to regular expressions. + // + // @since 3.16.0. + RegularExpressions *RegularExpressionsClientCapabilities `json:"regularExpressions,omitempty"` + + // Markdown client capabilities specific to the client's markdown parser. + // + // @since 3.16.0. + Markdown *MarkdownClientCapabilities `json:"markdown,omitempty"` +} + +// RegularExpressionsClientCapabilities represents a client capabilities specific to regular expressions. +// +// The following features from the ECMAScript 2020 regular expression specification are NOT mandatory for a client: +// +// Assertions +// Lookahead assertion, Negative lookahead assertion, lookbehind assertion, negative lookbehind assertion. +// Character classes +// Matching control characters using caret notation (e.g. "\cX") and matching UTF-16 code units (e.g. "\uhhhh"). +// Group and ranges +// Named capturing groups. +// Unicode property escapes +// None of the features needs to be supported. +// +// The only regular expression flag that a client needs to support is "i" to specify a case insensitive search. +// +// @since 3.16.0. +type RegularExpressionsClientCapabilities struct { + // Engine is the engine's name. + // + // Well known engine name is "ECMAScript". + // https://tc39.es/ecma262/#sec-regexp-regular-expression-objects + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions + Engine string `json:"engine"` + + // Version is the engine's version. + // + // Well known engine version is "ES2020". + // https://tc39.es/ecma262/#sec-regexp-regular-expression-objects + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions + Version string `json:"version,omitempty"` +} + +// MarkdownClientCapabilities represents a client capabilities specific to the used markdown parser. +// +// @since 3.16.0. +type MarkdownClientCapabilities struct { + // Parser is the name of the parser. + Parser string `json:"parser"` + + // version is the version of the parser. + Version string `json:"version,omitempty"` +} diff --git a/vendor/go.lsp.dev/protocol/capabilities_server.go b/vendor/go.lsp.dev/protocol/capabilities_server.go new file mode 100644 index 00000000000..40ae88a6dfc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/capabilities_server.go @@ -0,0 +1,523 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// ServerCapabilities efines the capabilities provided by a language server. +type ServerCapabilities struct { + // TextDocumentSync defines how text documents are synced. Is either a detailed structure defining each notification + // or for backwards compatibility the TextDocumentSyncKind number. + // + // If omitted it defaults to TextDocumentSyncKind.None` + TextDocumentSync interface{} `json:"textDocumentSync,omitempty"` // *TextDocumentSyncOptions | TextDocumentSyncKind + + // CompletionProvider is The server provides completion support. + CompletionProvider *CompletionOptions `json:"completionProvider,omitempty"` + + // HoverProvider is the server provides hover support. + HoverProvider interface{} `json:"hoverProvider,omitempty"` // TODO(zchee): bool | *HoverOptions + + // SignatureHelpProvider is the server provides signature help support. + SignatureHelpProvider *SignatureHelpOptions `json:"signatureHelpProvider,omitempty"` + + // DeclarationProvider is the server provides Goto Declaration support. + // + // @since 3.14.0. + DeclarationProvider interface{} `json:"declarationProvider,omitempty"` // TODO(zchee): bool | *DeclarationOptions | *DeclarationRegistrationOptions + + // DefinitionProvider is the server provides Goto definition support. + DefinitionProvider interface{} `json:"definitionProvider,omitempty"` // TODO(zchee): bool | *DefinitionOptions + + // TypeDefinitionProvider is the provides Goto Type Definition support. + // + // @since 3.6.0. + TypeDefinitionProvider interface{} `json:"typeDefinitionProvider,omitempty"` // TODO(zchee): bool | *TypeDefinitionOptions | *TypeDefinitionRegistrationOptions + + // ImplementationProvider is the provides Goto Implementation support. + // + // @since 3.6.0. + ImplementationProvider interface{} `json:"implementationProvider,omitempty"` // TODO(zchee): bool | *ImplementationOptions | *ImplementationRegistrationOptions + + // ReferencesProvider is the server provides find references support. + ReferencesProvider interface{} `json:"referencesProvider,omitempty"` // TODO(zchee): bool | *ReferenceOptions + + // DocumentHighlightProvider is the server provides document highlight support. + DocumentHighlightProvider interface{} `json:"documentHighlightProvider,omitempty"` // TODO(zchee): bool | *DocumentHighlightOptions + + // DocumentSymbolProvider is the server provides document symbol support. + DocumentSymbolProvider interface{} `json:"documentSymbolProvider,omitempty"` // TODO(zchee): bool | *DocumentSymbolOptions + + // CodeActionProvider is the server provides code actions. + // + // CodeActionOptions may only be specified if the client states that it supports CodeActionLiteralSupport in its + // initial Initialize request. + CodeActionProvider interface{} `json:"codeActionProvider,omitempty"` // TODO(zchee): bool | *CodeActionOptions + + // CodeLensProvider is the server provides code lens. + CodeLensProvider *CodeLensOptions `json:"codeLensProvider,omitempty"` + + // The server provides document link support. + DocumentLinkProvider *DocumentLinkOptions `json:"documentLinkProvider,omitempty"` + + // ColorProvider is the server provides color provider support. + // + // @since 3.6.0. + ColorProvider interface{} `json:"colorProvider,omitempty"` // TODO(zchee): bool | *DocumentColorOptions | *DocumentColorRegistrationOptions + + // WorkspaceSymbolProvider is the server provides workspace symbol support. + WorkspaceSymbolProvider interface{} `json:"workspaceSymbolProvider,omitempty"` // TODO(zchee): bool | *WorkspaceSymbolOptions + + // DocumentFormattingProvider is the server provides document formatting. + DocumentFormattingProvider interface{} `json:"documentFormattingProvider,omitempty"` // TODO(zchee): bool | *DocumentFormattingOptions + + // DocumentRangeFormattingProvider is the server provides document range formatting. + DocumentRangeFormattingProvider interface{} `json:"documentRangeFormattingProvider,omitempty"` // TODO(zchee): bool | *DocumentRangeFormattingOptions + + // DocumentOnTypeFormattingProvider is the server provides document formatting on typing. + DocumentOnTypeFormattingProvider *DocumentOnTypeFormattingOptions `json:"documentOnTypeFormattingProvider,omitempty"` + + // RenameProvider is the server provides rename support. + // + // RenameOptions may only be specified if the client states that it supports PrepareSupport in its + // initial Initialize request. + RenameProvider interface{} `json:"renameProvider,omitempty"` // TODO(zchee): bool | *RenameOptions + + // FoldingRangeProvider is the server provides folding provider support. + // + // @since 3.10.0. + FoldingRangeProvider interface{} `json:"foldingRangeProvider,omitempty"` // TODO(zchee): bool | *FoldingRangeOptions | *FoldingRangeRegistrationOptions + + // SelectionRangeProvider is the server provides selection range support. + // + // @since 3.15.0. + SelectionRangeProvider interface{} `json:"selectionRangeProvider,omitempty"` // TODO(zchee): bool | *SelectionRangeOptions | *SelectionRangeRegistrationOptions + + // ExecuteCommandProvider is the server provides execute command support. + ExecuteCommandProvider *ExecuteCommandOptions `json:"executeCommandProvider,omitempty"` + + // CallHierarchyProvider is the server provides call hierarchy support. + // + // @since 3.16.0. + CallHierarchyProvider interface{} `json:"callHierarchyProvider,omitempty"` // TODO(zchee): bool | *CallHierarchyOptions | *CallHierarchyRegistrationOptions + + // LinkedEditingRangeProvider is the server provides linked editing range support. + // + // @since 3.16.0. + LinkedEditingRangeProvider interface{} `json:"linkedEditingRangeProvider,omitempty"` // TODO(zchee): bool | *LinkedEditingRangeOptions | *LinkedEditingRangeRegistrationOptions + + // SemanticTokensProvider is the server provides semantic tokens support. + // + // @since 3.16.0. + SemanticTokensProvider interface{} `json:"semanticTokensProvider,omitempty"` // TODO(zchee): *SemanticTokensOptions | *SemanticTokensRegistrationOptions + + // Workspace is the window specific server capabilities. + Workspace *ServerCapabilitiesWorkspace `json:"workspace,omitempty"` + + // MonikerProvider is the server provides moniker support. + // + // @since 3.16.0. + MonikerProvider interface{} `json:"monikerProvider,omitempty"` // TODO(zchee): bool | *MonikerOptions | *MonikerRegistrationOptions + + // Experimental server capabilities. + Experimental interface{} `json:"experimental,omitempty"` +} + +// TextDocumentSyncOptions TextDocumentSync options. +type TextDocumentSyncOptions struct { + // OpenClose open and close notifications are sent to the server. + OpenClose bool `json:"openClose,omitempty"` + + // Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full + // and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None. + Change TextDocumentSyncKind `json:"change,omitempty"` + + // WillSave notifications are sent to the server. + WillSave bool `json:"willSave,omitempty"` + + // WillSaveWaitUntil will save wait until requests are sent to the server. + WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` + + // Save notifications are sent to the server. + Save *SaveOptions `json:"save,omitempty"` +} + +// SaveOptions save options. +type SaveOptions struct { + // IncludeText is the client is supposed to include the content on save. + IncludeText bool `json:"includeText,omitempty"` +} + +// TextDocumentSyncKind defines how the host (editor) should sync document changes to the language server. +type TextDocumentSyncKind float64 + +const ( + // TextDocumentSyncKindNone documents should not be synced at all. + TextDocumentSyncKindNone TextDocumentSyncKind = 0 + + // TextDocumentSyncKindFull documents are synced by always sending the full content + // of the document. + TextDocumentSyncKindFull TextDocumentSyncKind = 1 + + // TextDocumentSyncKindIncremental documents are synced by sending the full content on open. + // After that only incremental updates to the document are + // send. + TextDocumentSyncKindIncremental TextDocumentSyncKind = 2 +) + +// String implements fmt.Stringer. +func (k TextDocumentSyncKind) String() string { + switch k { + case TextDocumentSyncKindNone: + return "None" + case TextDocumentSyncKindFull: + return "Full" + case TextDocumentSyncKindIncremental: + return "Incremental" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionOptions Completion options. +type CompletionOptions struct { + // The server provides support to resolve additional + // information for a completion item. + ResolveProvider bool `json:"resolveProvider,omitempty"` + + // The characters that trigger completion automatically. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` +} + +// HoverOptions option of hover provider server capabilities. +type HoverOptions struct { + WorkDoneProgressOptions +} + +// SignatureHelpOptions SignatureHelp options. +type SignatureHelpOptions struct { + // The characters that trigger signature help + // automatically. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` + + // RetriggerCharacters is the slist of characters that re-trigger signature help. + // + // These trigger characters are only active when signature help is already + // showing. + // All trigger characters are also counted as re-trigger characters. + // + // @since 3.15.0. + RetriggerCharacters []string `json:"retriggerCharacters,omitempty"` +} + +// DeclarationOptions registration option of Declaration server capability. +// +// @since 3.15.0. +type DeclarationOptions struct { + WorkDoneProgressOptions +} + +// DeclarationRegistrationOptions registration option of Declaration server capability. +// +// @since 3.15.0. +type DeclarationRegistrationOptions struct { + DeclarationOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +// DefinitionOptions registration option of Definition server capability. +// +// @since 3.15.0. +type DefinitionOptions struct { + WorkDoneProgressOptions +} + +// TypeDefinitionOptions registration option of TypeDefinition server capability. +// +// @since 3.15.0. +type TypeDefinitionOptions struct { + WorkDoneProgressOptions +} + +// TypeDefinitionRegistrationOptions registration option of TypeDefinition server capability. +// +// @since 3.15.0. +type TypeDefinitionRegistrationOptions struct { + TextDocumentRegistrationOptions + TypeDefinitionOptions + StaticRegistrationOptions +} + +// ImplementationOptions registration option of Implementation server capability. +// +// @since 3.15.0. +type ImplementationOptions struct { + WorkDoneProgressOptions +} + +// ImplementationRegistrationOptions registration option of Implementation server capability. +// +// @since 3.15.0. +type ImplementationRegistrationOptions struct { + TextDocumentRegistrationOptions + ImplementationOptions + StaticRegistrationOptions +} + +// ReferenceOptions registration option of Reference server capability. +type ReferenceOptions struct { + WorkDoneProgressOptions +} + +// DocumentHighlightOptions registration option of DocumentHighlight server capability. +// +// @since 3.15.0. +type DocumentHighlightOptions struct { + WorkDoneProgressOptions +} + +// DocumentSymbolOptions registration option of DocumentSymbol server capability. +// +// @since 3.15.0. +type DocumentSymbolOptions struct { + WorkDoneProgressOptions + + // Label a human-readable string that is shown when multiple outlines trees + // are shown for the same document. + // + // @since 3.16.0. + Label string `json:"label,omitempty"` +} + +// CodeActionOptions CodeAction options. +type CodeActionOptions struct { + // CodeActionKinds that this server may return. + // + // The list of kinds may be generic, such as "CodeActionKind.Refactor", or the server + // may list out every specific kind they provide. + CodeActionKinds []CodeActionKind `json:"codeActionKinds,omitempty"` + + // ResolveProvider is the server provides support to resolve additional + // information for a code action. + // + // @since 3.16.0. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// CodeLensOptions CodeLens options. +type CodeLensOptions struct { + // Code lens has a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// DocumentLinkOptions document link options. +type DocumentLinkOptions struct { + // ResolveProvider document links have a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// DocumentColorOptions registration option of DocumentColor server capability. +// +// @since 3.15.0. +type DocumentColorOptions struct { + WorkDoneProgressOptions +} + +// DocumentColorRegistrationOptions registration option of DocumentColor server capability. +// +// @since 3.15.0. +type DocumentColorRegistrationOptions struct { + TextDocumentRegistrationOptions + StaticRegistrationOptions + DocumentColorOptions +} + +// WorkspaceSymbolOptions registration option of WorkspaceSymbol server capability. +// +// @since 3.15.0. +type WorkspaceSymbolOptions struct { + WorkDoneProgressOptions +} + +// DocumentFormattingOptions registration option of DocumentFormatting server capability. +// +// @since 3.15.0. +type DocumentFormattingOptions struct { + WorkDoneProgressOptions +} + +// DocumentRangeFormattingOptions registration option of DocumentRangeFormatting server capability. +// +// @since 3.15.0. +type DocumentRangeFormattingOptions struct { + WorkDoneProgressOptions +} + +// DocumentOnTypeFormattingOptions format document on type options. +type DocumentOnTypeFormattingOptions struct { + // FirstTriggerCharacter a character on which formatting should be triggered, like "}". + FirstTriggerCharacter string `json:"firstTriggerCharacter"` + + // MoreTriggerCharacter more trigger characters. + MoreTriggerCharacter []string `json:"moreTriggerCharacter,omitempty"` +} + +// RenameOptions rename options. +type RenameOptions struct { + // PrepareProvider renames should be checked and tested before being executed. + PrepareProvider bool `json:"prepareProvider,omitempty"` +} + +// FoldingRangeOptions registration option of FoldingRange server capability. +// +// @since 3.15.0. +type FoldingRangeOptions struct { + WorkDoneProgressOptions +} + +// FoldingRangeRegistrationOptions registration option of FoldingRange server capability. +// +// @since 3.15.0. +type FoldingRangeRegistrationOptions struct { + TextDocumentRegistrationOptions + FoldingRangeOptions + StaticRegistrationOptions +} + +// ExecuteCommandOptions execute command options. +type ExecuteCommandOptions struct { + // Commands is the commands to be executed on the server + Commands []string `json:"commands"` +} + +// CallHierarchyOptions option of CallHierarchy. +// +// @since 3.16.0. +type CallHierarchyOptions struct { + WorkDoneProgressOptions +} + +// CallHierarchyRegistrationOptions registration options of CallHierarchy. +// +// @since 3.16.0. +type CallHierarchyRegistrationOptions struct { + TextDocumentRegistrationOptions + CallHierarchyOptions + StaticRegistrationOptions +} + +// LinkedEditingRangeOptions option of linked editing range provider server capabilities. +// +// @since 3.16.0. +type LinkedEditingRangeOptions struct { + WorkDoneProgressOptions +} + +// LinkedEditingRangeRegistrationOptions registration option of linked editing range provider server capabilities. +// +// @since 3.16.0. +type LinkedEditingRangeRegistrationOptions struct { + TextDocumentRegistrationOptions + LinkedEditingRangeOptions + StaticRegistrationOptions +} + +// SemanticTokensOptions option of semantic tokens provider server capabilities. +// +// @since 3.16.0. +type SemanticTokensOptions struct { + WorkDoneProgressOptions +} + +// SemanticTokensRegistrationOptions registration option of semantic tokens provider server capabilities. +// +// @since 3.16.0. +type SemanticTokensRegistrationOptions struct { + TextDocumentRegistrationOptions + SemanticTokensOptions + StaticRegistrationOptions +} + +// ServerCapabilitiesWorkspace specific server capabilities. +type ServerCapabilitiesWorkspace struct { + // WorkspaceFolders is the server supports workspace folder. + // + // @since 3.6.0. + WorkspaceFolders *ServerCapabilitiesWorkspaceFolders `json:"workspaceFolders,omitempty"` + + // FileOperations is the server is interested in file notifications/requests. + // + // @since 3.16.0. + FileOperations *ServerCapabilitiesWorkspaceFileOperations `json:"fileOperations,omitempty"` +} + +// ServerCapabilitiesWorkspaceFolders is the server supports workspace folder. +// +// @since 3.6.0. +type ServerCapabilitiesWorkspaceFolders struct { + // Supported is the server has support for workspace folders + Supported bool `json:"supported,omitempty"` + + // ChangeNotifications whether the server wants to receive workspace folder + // change notifications. + // + // If a strings is provided the string is treated as a ID + // under which the notification is registered on the client + // side. The ID can be used to unregister for these events + // using the `client/unregisterCapability` request. + ChangeNotifications interface{} `json:"changeNotifications,omitempty"` // string | boolean +} + +// ServerCapabilitiesWorkspaceFileOperations is the server is interested in file notifications/requests. +// +// @since 3.16.0. +type ServerCapabilitiesWorkspaceFileOperations struct { + // DidCreate is the server is interested in receiving didCreateFiles + // notifications. + DidCreate *FileOperationRegistrationOptions `json:"didCreate,omitempty"` + + // WillCreate is the server is interested in receiving willCreateFiles requests. + WillCreate *FileOperationRegistrationOptions `json:"willCreate,omitempty"` + + // DidRename is the server is interested in receiving didRenameFiles + // notifications. + DidRename *FileOperationRegistrationOptions `json:"didRename,omitempty"` + + // WillRename is the server is interested in receiving willRenameFiles requests. + WillRename *FileOperationRegistrationOptions `json:"willRename,omitempty"` + + // DidDelete is the server is interested in receiving didDeleteFiles file + // notifications. + DidDelete *FileOperationRegistrationOptions `json:"didDelete,omitempty"` + + // WillDelete is the server is interested in receiving willDeleteFiles file + // requests. + WillDelete *FileOperationRegistrationOptions `json:"willDelete,omitempty"` +} + +// FileOperationRegistrationOptions is the options to register for file operations. +// +// @since 3.16.0. +type FileOperationRegistrationOptions struct { + // filters is the actual filters. + Filters []FileOperationFilter `json:"filters"` +} + +// MonikerOptions option of moniker provider server capabilities. +// +// @since 3.16.0. +type MonikerOptions struct { + WorkDoneProgressOptions +} + +// MonikerRegistrationOptions registration option of moniker provider server capabilities. +// +// @since 3.16.0. +type MonikerRegistrationOptions struct { + TextDocumentRegistrationOptions + MonikerOptions +} diff --git a/vendor/go.lsp.dev/protocol/client.go b/vendor/go.lsp.dev/protocol/client.go new file mode 100644 index 00000000000..02109209441 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/client.go @@ -0,0 +1,412 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "bytes" + "context" + "fmt" + + "github.com/segmentio/encoding/json" + "go.uber.org/zap" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/pkg/xcontext" +) + +// ClientDispatcher returns a Client that dispatches LSP requests across the +// given jsonrpc2 connection. +func ClientDispatcher(conn jsonrpc2.Conn, logger *zap.Logger) Client { + return &client{ + Conn: conn, + logger: logger, + } +} + +// ClientHandler handler of LSP client. +func ClientHandler(client Client, handler jsonrpc2.Handler) jsonrpc2.Handler { + h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error { + if ctx.Err() != nil { + xctx := xcontext.Detach(ctx) + + return reply(xctx, nil, ErrRequestCancelled) + } + + handled, err := clientDispatch(ctx, client, reply, req) + if handled || err != nil { + return err + } + + return handler(ctx, reply, req) + } + + return h +} + +// clientDispatch implements jsonrpc2.Handler. +//nolint:funlen,cyclop +func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, req jsonrpc2.Request) (handled bool, err error) { + if ctx.Err() != nil { + return true, reply(ctx, nil, ErrRequestCancelled) + } + + dec := json.NewDecoder(bytes.NewReader(req.Params())) + logger := LoggerFromContext(ctx) + + switch req.Method() { + case MethodProgress: // notification + defer logger.Debug(MethodProgress, zap.Error(err)) + + var params ProgressParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.Progress(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkDoneProgressCreate: // request + defer logger.Debug(MethodWorkDoneProgressCreate, zap.Error(err)) + + var params WorkDoneProgressCreateParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.WorkDoneProgressCreate(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWindowLogMessage: // notification + defer logger.Debug(MethodWindowLogMessage, zap.Error(err)) + + var params LogMessageParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.LogMessage(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentPublishDiagnostics: // notification + defer logger.Debug(MethodTextDocumentPublishDiagnostics, zap.Error(err)) + + var params PublishDiagnosticsParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.PublishDiagnostics(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWindowShowMessage: // notification + defer logger.Debug(MethodWindowShowMessage, zap.Error(err)) + + var params ShowMessageParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.ShowMessage(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWindowShowMessageRequest: // request + defer logger.Debug(MethodWindowShowMessageRequest, zap.Error(err)) + + var params ShowMessageRequestParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := client.ShowMessageRequest(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTelemetryEvent: // notification + defer logger.Debug(MethodTelemetryEvent, zap.Error(err)) + + var params interface{} + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.Telemetry(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodClientRegisterCapability: // request + defer logger.Debug(MethodClientRegisterCapability, zap.Error(err)) + + var params RegistrationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.RegisterCapability(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodClientUnregisterCapability: // request + defer logger.Debug(MethodClientUnregisterCapability, zap.Error(err)) + + var params UnregistrationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.UnregisterCapability(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceApplyEdit: // request + defer logger.Debug(MethodWorkspaceApplyEdit, zap.Error(err)) + + var params ApplyWorkspaceEditParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := client.ApplyEdit(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceConfiguration: // request + defer logger.Debug(MethodWorkspaceConfiguration, zap.Error(err)) + + var params ConfigurationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := client.Configuration(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceWorkspaceFolders: // request + defer logger.Debug(MethodWorkspaceWorkspaceFolders, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + resp, err := client.WorkspaceFolders(ctx) + + return true, reply(ctx, resp, err) + + default: + return false, nil + } +} + +// Client represents a Language Server Protocol client. +type Client interface { + Progress(ctx context.Context, params *ProgressParams) (err error) + WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) (err error) + LogMessage(ctx context.Context, params *LogMessageParams) (err error) + PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) (err error) + ShowMessage(ctx context.Context, params *ShowMessageParams) (err error) + ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (result *MessageActionItem, err error) + Telemetry(ctx context.Context, params interface{}) (err error) + RegisterCapability(ctx context.Context, params *RegistrationParams) (err error) + UnregisterCapability(ctx context.Context, params *UnregistrationParams) (err error) + ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (result bool, err error) + Configuration(ctx context.Context, params *ConfigurationParams) (result []interface{}, err error) + WorkspaceFolders(ctx context.Context) (result []WorkspaceFolder, err error) +} + +// list of client methods. +const ( + // MethodProgress method name of "$/progress". + MethodProgress = "$/progress" + + // MethodWorkDoneProgressCreate method name of "window/workDoneProgress/create". + MethodWorkDoneProgressCreate = "window/workDoneProgress/create" + + // MethodWindowShowMessage method name of "window/showMessage". + MethodWindowShowMessage = "window/showMessage" + + // MethodWindowShowMessageRequest method name of "window/showMessageRequest. + MethodWindowShowMessageRequest = "window/showMessageRequest" + + // MethodWindowLogMessage method name of "window/logMessage. + MethodWindowLogMessage = "window/logMessage" + + // MethodTelemetryEvent method name of "telemetry/event. + MethodTelemetryEvent = "telemetry/event" + + // MethodClientRegisterCapability method name of "client/registerCapability. + MethodClientRegisterCapability = "client/registerCapability" + + // MethodClientUnregisterCapability method name of "client/unregisterCapability. + MethodClientUnregisterCapability = "client/unregisterCapability" + + // MethodTextDocumentPublishDiagnostics method name of "textDocument/publishDiagnostics. + MethodTextDocumentPublishDiagnostics = "textDocument/publishDiagnostics" + + // MethodWorkspaceApplyEdit method name of "workspace/applyEdit. + MethodWorkspaceApplyEdit = "workspace/applyEdit" + + // MethodWorkspaceConfiguration method name of "workspace/configuration. + MethodWorkspaceConfiguration = "workspace/configuration" + + // MethodWorkspaceWorkspaceFolders method name of "workspace/workspaceFolders". + MethodWorkspaceWorkspaceFolders = "workspace/workspaceFolders" +) + +// client implements a Language Server Protocol client. +type client struct { + jsonrpc2.Conn + + logger *zap.Logger +} + +// compiler time check whether the Client implements ClientInterface interface. +var _ Client = (*client)(nil) + +// Progress is the base protocol offers also support to report progress in a generic fashion. +// +// This mechanism can be used to report any kind of progress including work done progress (usually used to report progress in the user interface using a progress bar) and +// partial result progress to support streaming of results. +// +// @since 3.16.0. +func (c *client) Progress(ctx context.Context, params *ProgressParams) (err error) { + c.logger.Debug("call " + MethodProgress) + defer c.logger.Debug("end "+MethodProgress, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodProgress, params) +} + +// WorkDoneProgressCreate sends the request is sent from the server to the client to ask the client to create a work done progress. +// +// @since 3.16.0. +func (c *client) WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) (err error) { + c.logger.Debug("call " + MethodWorkDoneProgressCreate) + defer c.logger.Debug("end "+MethodWorkDoneProgressCreate, zap.Error(err)) + + return Call(ctx, c.Conn, MethodWorkDoneProgressCreate, params, nil) +} + +// LogMessage sends the notification from the server to the client to ask the client to log a particular message. +func (c *client) LogMessage(ctx context.Context, params *LogMessageParams) (err error) { + c.logger.Debug("call " + MethodWindowLogMessage) + defer c.logger.Debug("end "+MethodWindowLogMessage, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodWindowLogMessage, params) +} + +// PublishDiagnostics sends the notification from the server to the client to signal results of validation runs. +// +// Diagnostics are ā€œownedā€ by the server so it is the serverā€™s responsibility to clear them if necessary. The following rule is used for VS Code servers that generate diagnostics: +// +// - if a language is single file only (for example HTML) then diagnostics are cleared by the server when the file is closed. +// - if a language has a project system (for example C#) diagnostics are not cleared when a file closes. When a project is opened all diagnostics for all files are recomputed (or read from a cache). +// +// When a file changes it is the serverā€™s responsibility to re-compute diagnostics and push them to the client. +// If the computed set is empty it has to push the empty array to clear former diagnostics. +// Newly pushed diagnostics always replace previously pushed diagnostics. There is no merging that happens on the client side. +func (c *client) PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) (err error) { + c.logger.Debug("call " + MethodTextDocumentPublishDiagnostics) + defer c.logger.Debug("end "+MethodTextDocumentPublishDiagnostics, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodTextDocumentPublishDiagnostics, params) +} + +// ShowMessage sends the notification from a server to a client to ask the +// client to display a particular message in the user interface. +func (c *client) ShowMessage(ctx context.Context, params *ShowMessageParams) (err error) { + return c.Conn.Notify(ctx, MethodWindowShowMessage, params) +} + +// ShowMessageRequest sends the request from a server to a client to ask the client to display a particular message in the user interface. +// +// In addition to the show message notification the request allows to pass actions and to wait for an answer from the client. +func (c *client) ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (_ *MessageActionItem, err error) { + c.logger.Debug("call " + MethodWindowShowMessageRequest) + defer c.logger.Debug("end "+MethodWindowShowMessageRequest, zap.Error(err)) + + var result *MessageActionItem + if err := Call(ctx, c.Conn, MethodWindowShowMessageRequest, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Telemetry sends the notification from the server to the client to ask the client to log a telemetry event. +func (c *client) Telemetry(ctx context.Context, params interface{}) (err error) { + c.logger.Debug("call " + MethodTelemetryEvent) + defer c.logger.Debug("end "+MethodTelemetryEvent, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodTelemetryEvent, params) +} + +// RegisterCapability sends the request from the server to the client to register for a new capability on the client side. +// +// Not all clients need to support dynamic capability registration. +// +// A client opts in via the dynamicRegistration property on the specific client capabilities. +// A client can even provide dynamic registration for capability A but not for capability B (see TextDocumentClientCapabilities as an example). +func (c *client) RegisterCapability(ctx context.Context, params *RegistrationParams) (err error) { + c.logger.Debug("call " + MethodClientRegisterCapability) + defer c.logger.Debug("end "+MethodClientRegisterCapability, zap.Error(err)) + + return Call(ctx, c.Conn, MethodClientRegisterCapability, params, nil) +} + +// UnregisterCapability sends the request from the server to the client to unregister a previously registered capability. +func (c *client) UnregisterCapability(ctx context.Context, params *UnregistrationParams) (err error) { + c.logger.Debug("call " + MethodClientUnregisterCapability) + defer c.logger.Debug("end "+MethodClientUnregisterCapability, zap.Error(err)) + + return Call(ctx, c.Conn, MethodClientUnregisterCapability, params, nil) +} + +// ApplyEdit sends the request from the server to the client to modify resource on the client side. +func (c *client) ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (result bool, err error) { + c.logger.Debug("call " + MethodWorkspaceApplyEdit) + defer c.logger.Debug("end "+MethodWorkspaceApplyEdit, zap.Error(err)) + + if err := Call(ctx, c.Conn, MethodWorkspaceApplyEdit, params, &result); err != nil { + return false, err + } + + return result, nil +} + +// Configuration sends the request from the server to the client to fetch configuration settings from the client. +// +// The request can fetch several configuration settings in one roundtrip. +// The order of the returned configuration settings correspond to the order of the +// passed ConfigurationItems (e.g. the first item in the response is the result for the first configuration item in the params). +func (c *client) Configuration(ctx context.Context, params *ConfigurationParams) (_ []interface{}, err error) { + c.logger.Debug("call " + MethodWorkspaceConfiguration) + defer c.logger.Debug("end "+MethodWorkspaceConfiguration, zap.Error(err)) + + var result []interface{} + if err := Call(ctx, c.Conn, MethodWorkspaceConfiguration, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// WorkspaceFolders sends the request from the server to the client to fetch the current open list of workspace folders. +// +// Returns null in the response if only a single file is open in the tool. Returns an empty array if a workspace is open but no folders are configured. +// +// @since 3.6.0. +func (c *client) WorkspaceFolders(ctx context.Context) (result []WorkspaceFolder, err error) { + c.logger.Debug("call " + MethodWorkspaceWorkspaceFolders) + defer c.logger.Debug("end "+MethodWorkspaceWorkspaceFolders, zap.Error(err)) + + if err := Call(ctx, c.Conn, MethodWorkspaceWorkspaceFolders, nil, &result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/go.lsp.dev/protocol/context.go b/vendor/go.lsp.dev/protocol/context.go new file mode 100644 index 00000000000..d12bcd800d3 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/context.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2020 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "context" + + "go.uber.org/zap" +) + +var ( + ctxLogger struct{} + ctxClient struct{} +) + +// WithLogger returns the context with zap.Logger value. +func WithLogger(ctx context.Context, logger *zap.Logger) context.Context { + return context.WithValue(ctx, ctxLogger, logger) +} + +// LoggerFromContext extracts zap.Logger from context. +func LoggerFromContext(ctx context.Context) *zap.Logger { + logger, ok := ctx.Value(ctxLogger).(*zap.Logger) + if !ok { + return zap.NewNop() + } + + return logger +} + +// WithClient returns the context with Client value. +func WithClient(ctx context.Context, client Client) context.Context { + return context.WithValue(ctx, ctxClient, client) +} diff --git a/vendor/go.lsp.dev/protocol/deprecated.go b/vendor/go.lsp.dev/protocol/deprecated.go new file mode 100644 index 00000000000..fa4b2160939 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/deprecated.go @@ -0,0 +1,264 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// ClientCapabilitiesShowDocument alias of ShowDocumentClientCapabilities. +// +// Deprecated: Use ShowDocumentClientCapabilities instead. +type ClientCapabilitiesShowDocument = ShowDocumentClientCapabilities + +// ClientCapabilitiesShowMessageRequest alias of ShowMessageRequestClientCapabilities. +// +// Deprecated: Use ShowMessageRequestClientCapabilities instead. +type ClientCapabilitiesShowMessageRequest = ShowMessageRequestClientCapabilities + +// ClientCapabilitiesShowMessageRequestMessageActionItem alias of ShowMessageRequestClientCapabilitiesMessageActionItem. +// +// Deprecated: Use ShowMessageRequestClientCapabilitiesMessageActionItem instead. +type ClientCapabilitiesShowMessageRequestMessageActionItem = ShowMessageRequestClientCapabilitiesMessageActionItem + +// ReferencesParams alias of ReferenceParams. +// +// Deprecated: Use ReferenceParams instead. +type ReferencesParams = ReferenceParams + +// TextDocumentClientCapabilitiesCallHierarchy alias of CallHierarchyClientCapabilities. +// +// Deprecated: Use CallHierarchyClientCapabilities instead. +type TextDocumentClientCapabilitiesCallHierarchy = CallHierarchyClientCapabilities + +// TextDocumentClientCapabilitiesCodeAction alias of CodeActionClientCapabilities. +// +// Deprecated: Use CodeActionClientCapabilities instead. +type TextDocumentClientCapabilitiesCodeAction = CodeActionClientCapabilities + +// TextDocumentClientCapabilitiesCodeActionKind alias of CodeActionClientCapabilitiesKind. +// +// Deprecated: Use CodeActionClientCapabilitiesKind instead. +type TextDocumentClientCapabilitiesCodeActionKind = CodeActionClientCapabilitiesKind + +// TextDocumentClientCapabilitiesCodeActionLiteralSupport alias of CodeActionClientCapabilitiesLiteralSupport. +// +// Deprecated: Use CodeActionClientCapabilitiesLiteralSupport instead. +type TextDocumentClientCapabilitiesCodeActionLiteralSupport = CodeActionClientCapabilitiesLiteralSupport + +// TextDocumentClientCapabilitiesCodeActionResolveSupport alias of CodeActionClientCapabilitiesResolveSupport. +// +// Deprecated: Use CodeActionClientCapabilitiesResolveSupport instead. +type TextDocumentClientCapabilitiesCodeActionResolveSupport = CodeActionClientCapabilitiesResolveSupport + +// TextDocumentClientCapabilitiesCodeLens alias of CodeLensClientCapabilities. +// +// Deprecated: Use CodeLensClientCapabilities instead. +type TextDocumentClientCapabilitiesCodeLens = CodeLensClientCapabilities + +// TextDocumentClientCapabilitiesColorProvider alias of DocumentColorClientCapabilities. +// +// Deprecated: Use DocumentColorClientCapabilities instead. +type TextDocumentClientCapabilitiesColorProvider = DocumentColorClientCapabilities + +// TextDocumentClientCapabilitiesCompletion alias of CompletionTextDocumentClientCapabilities. +// +// Deprecated: Use CompletionTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesCompletion = CompletionTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesCompletionItem alias of CompletionTextDocumentClientCapabilitiesItem. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItem instead. +type TextDocumentClientCapabilitiesCompletionItem = CompletionTextDocumentClientCapabilitiesItem + +// TextDocumentClientCapabilitiesCompletionItemInsertTextModeSupport alias of CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport instead. +type TextDocumentClientCapabilitiesCompletionItemInsertTextModeSupport = CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport + +// TextDocumentClientCapabilitiesCompletionItemKind alias of CompletionTextDocumentClientCapabilitiesItemKind. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemKind instead. +type TextDocumentClientCapabilitiesCompletionItemKind = CompletionTextDocumentClientCapabilitiesItemKind + +// TextDocumentClientCapabilitiesCompletionItemResolveSupport alias of CompletionTextDocumentClientCapabilitiesItemResolveSupport. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemResolveSupport instead. +type TextDocumentClientCapabilitiesCompletionItemResolveSupport = CompletionTextDocumentClientCapabilitiesItemResolveSupport + +// TextDocumentClientCapabilitiesCompletionItemTagSupport alias of CompletionTextDocumentClientCapabilitiesItemTagSupport. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemTagSupport instead. +type TextDocumentClientCapabilitiesCompletionItemTagSupport = CompletionTextDocumentClientCapabilitiesItemTagSupport + +// TextDocumentClientCapabilitiesDeclaration alias of DeclarationTextDocumentClientCapabilities. +// +// Deprecated: Use DeclarationTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesDeclaration = DeclarationTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesDefinition alias of DefinitionTextDocumentClientCapabilities. +// +// Deprecated: Use DefinitionTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesDefinition = DefinitionTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesDocumentHighlight alias of DocumentHighlightClientCapabilities. +// +// Deprecated: Use DocumentHighlightClientCapabilities instead. +type TextDocumentClientCapabilitiesDocumentHighlight = DocumentHighlightClientCapabilities + +// TextDocumentClientCapabilitiesDocumentLink alias of DocumentLinkClientCapabilities. +// +// Deprecated: Use DocumentLinkClientCapabilities instead. +type TextDocumentClientCapabilitiesDocumentLink = DocumentLinkClientCapabilities + +// TextDocumentClientCapabilitiesDocumentSymbol alias of DocumentSymbolClientCapabilities. +// +// Deprecated: Use DocumentSymbolClientCapabilities instead. +type TextDocumentClientCapabilitiesDocumentSymbol = DocumentSymbolClientCapabilities + +// TextDocumentClientCapabilitiesDocumentSymbolTagSupport alias of DocumentSymbolClientCapabilitiesTagSupport. +// +// Deprecated: Use DocumentSymbolClientCapabilitiesTagSupport instead. +type TextDocumentClientCapabilitiesDocumentSymbolTagSupport = DocumentSymbolClientCapabilitiesTagSupport + +// TextDocumentClientCapabilitiesFoldingRange alias of FoldingRangeClientCapabilities. +// +// Deprecated: Use FoldingRangeClientCapabilities instead. +type TextDocumentClientCapabilitiesFoldingRange = FoldingRangeClientCapabilities + +// TextDocumentClientCapabilitiesFormatting alias of DocumentFormattingClientCapabilities. +// +// Deprecated: Use DocumentFormattingClientCapabilities instead. +type TextDocumentClientCapabilitiesFormatting = DocumentFormattingClientCapabilities + +// TextDocumentClientCapabilitiesHover alias of HoverTextDocumentClientCapabilities. +// +// Deprecated: Use HoverTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesHover = HoverTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesImplementation alias of ImplementationTextDocumentClientCapabilities. +// +// Deprecated: Use ImplementationTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesImplementation = ImplementationTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesLinkedEditingRange alias of LinkedEditingRangeClientCapabilities. +// +// Deprecated: Use LinkedEditingRangeClientCapabilities instead. +type TextDocumentClientCapabilitiesLinkedEditingRange = LinkedEditingRangeClientCapabilities + +// TextDocumentClientCapabilitiesMoniker of MonikerClientCapabilities. +// +// Deprecated: Use MonikerClientCapabilities instead. +type TextDocumentClientCapabilitiesMoniker = MonikerClientCapabilities + +// TextDocumentClientCapabilitiesOnTypeFormatting of DocumentOnTypeFormattingClientCapabilities. +// +// Deprecated: Use DocumentOnTypeFormattingClientCapabilities instead. +type TextDocumentClientCapabilitiesOnTypeFormatting = DocumentOnTypeFormattingClientCapabilities + +// TextDocumentClientCapabilitiesPublishDiagnostics of PublishDiagnosticsClientCapabilities. +// +// Deprecated: Use PublishDiagnosticsClientCapabilities instead. +type TextDocumentClientCapabilitiesPublishDiagnostics = PublishDiagnosticsClientCapabilities + +// TextDocumentClientCapabilitiesPublishDiagnosticsTagSupport of PublishDiagnosticsClientCapabilitiesTagSupport. +// +// Deprecated: Use PublishDiagnosticsClientCapabilitiesTagSupport instead. +type TextDocumentClientCapabilitiesPublishDiagnosticsTagSupport = PublishDiagnosticsClientCapabilitiesTagSupport + +// TextDocumentClientCapabilitiesRangeFormatting of DocumentRangeFormattingClientCapabilities. +// +// Deprecated: Use DocumentRangeFormattingClientCapabilities instead. +type TextDocumentClientCapabilitiesRangeFormatting = DocumentRangeFormattingClientCapabilities + +// TextDocumentClientCapabilitiesReferences of ReferencesTextDocumentClientCapabilities. +// +// Deprecated: Use ReferencesTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesReferences = ReferencesTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesRename of RenameClientCapabilities. +// +// Deprecated: Use RenameClientCapabilities instead. +type TextDocumentClientCapabilitiesRename = RenameClientCapabilities + +// TextDocumentClientCapabilitiesSelectionRange of SelectionRangeClientCapabilities. +// +// Deprecated: Use SelectionRangeClientCapabilities instead. +type TextDocumentClientCapabilitiesSelectionRange = SelectionRangeClientCapabilities + +// TextDocumentClientCapabilitiesSemanticTokens of SemanticTokensClientCapabilities. +// +// Deprecated: Use SemanticTokensClientCapabilities instead. +type TextDocumentClientCapabilitiesSemanticTokens = SemanticTokensClientCapabilities + +// TextDocumentClientCapabilitiesSignatureHelp of SignatureHelpTextDocumentClientCapabilities. +// +// Deprecated: Use SignatureHelpTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesSignatureHelp = SignatureHelpTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesSynchronization of TextDocumentSyncClientCapabilities. +// +// Deprecated: Use TextDocumentSyncClientCapabilities instead. +type TextDocumentClientCapabilitiesSynchronization = TextDocumentSyncClientCapabilities + +// TextDocumentClientCapabilitiesTypeDefinition of TypeDefinitionTextDocumentClientCapabilities. +// +// Deprecated: Use TypeDefinitionTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesTypeDefinition = TypeDefinitionTextDocumentClientCapabilities + +// Abort alias of FailureHandlingKindAbort. +// +// Deprecated: Use FailureHandlingKindAbort instead. +const Abort = FailureHandlingKindAbort + +// TextOnlyTransactional alias of FailureHandlingKindTextOnlyTransactional. +// +// Deprecated: Use FailureHandlingKindTextOnlyTransactional instead. +const TextOnlyTransactional = FailureHandlingKindTextOnlyTransactional + +// Transactional alias of FailureHandlingKindTransactional. +// +// Deprecated: Use FailureHandlingKindTransactional instead. +const Transactional = FailureHandlingKindTransactional + +// Undo alias of FailureHandlingKindUndo. +// +// Deprecated: Use FailureHandlingKindUndo instead. +const Undo = FailureHandlingKindUndo + +// WorkspaceClientCapabilitiesSymbol alias of WorkspaceSymbolClientCapabilities. +// +// Deprecated: Use WorkspaceSymbolClientCapabilities instead. +type WorkspaceClientCapabilitiesSymbol = WorkspaceSymbolClientCapabilities + +// WorkspaceClientCapabilitiesSymbolKind alias of SymbolKindCapabilities. +// +// Deprecated: Use SymbolKindCapabilities instead. +type WorkspaceClientCapabilitiesSymbolKind = SymbolKindCapabilities + +// WorkspaceClientCapabilitiesCodeLens alias of CodeLensWorkspaceClientCapabilities. +// +// Deprecated: Use CodeLensWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesCodeLens = CodeLensWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesDidChangeConfiguration alias of DidChangeConfigurationWorkspaceClientCapabilities. +// +// Deprecated: Use DidChangeConfigurationWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesDidChangeConfiguration = DidChangeConfigurationWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesDidChangeWatchedFiles alias of DidChangeWatchedFilesWorkspaceClientCapabilities. +// +// Deprecated: Use DidChangeWatchedFilesWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesDidChangeWatchedFiles = DidChangeWatchedFilesWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesExecuteCommand alias of ExecuteCommandClientCapabilities. +// +// Deprecated: Use ExecuteCommandClientCapabilities instead. +type WorkspaceClientCapabilitiesExecuteCommand = ExecuteCommandClientCapabilities + +// WorkspaceClientCapabilitiesSemanticTokens alias of SemanticTokensWorkspaceClientCapabilities. +// +// Deprecated: Use SemanticTokensWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesSemanticTokens = SemanticTokensWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesSemanticTokensRequests alias of SemanticTokensWorkspaceClientCapabilitiesRequests. +// +// Deprecated: Use SemanticTokensWorkspaceClientCapabilitiesRequests instead. +type WorkspaceClientCapabilitiesSemanticTokensRequests = SemanticTokensWorkspaceClientCapabilitiesRequests diff --git a/vendor/go.lsp.dev/protocol/diagnostics.go b/vendor/go.lsp.dev/protocol/diagnostics.go new file mode 100644 index 00000000000..6097f466e42 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/diagnostics.go @@ -0,0 +1,149 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// Diagnostic represents a diagnostic, such as a compiler error or warning. +// +// Diagnostic objects are only valid in the scope of a resource. +type Diagnostic struct { + // Range is the range at which the message applies. + Range Range `json:"range"` + + // Severity is the diagnostic's severity. Can be omitted. If omitted it is up to the + // client to interpret diagnostics as error, warning, info or hint. + Severity DiagnosticSeverity `json:"severity,omitempty"` + + // Code is the diagnostic's code, which might appear in the user interface. + Code interface{} `json:"code,omitempty"` // int32 | string; + + // CodeDescription an optional property to describe the error code. + // + // @since 3.16.0. + CodeDescription *CodeDescription `json:"codeDescription,omitempty"` + + // Source a human-readable string describing the source of this + // diagnostic, e.g. 'typescript' or 'super lint'. + Source string `json:"source,omitempty"` + + // Message is the diagnostic's message. + Message string `json:"message"` + + // Tags is the additional metadata about the diagnostic. + // + // @since 3.15.0. + Tags []DiagnosticTag `json:"tags,omitempty"` + + // RelatedInformation an array of related diagnostic information, e.g. when symbol-names within + // a scope collide all definitions can be marked via this property. + RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"` + + // Data is a data entry field that is preserved between a + // "textDocument/publishDiagnostics" notification and + // "textDocument/codeAction" request. + // + // @since 3.16.0. + Data interface{} `json:"data,omitempty"` +} + +// DiagnosticSeverity indicates the severity of a Diagnostic message. +type DiagnosticSeverity float64 + +const ( + // DiagnosticSeverityError reports an error. + DiagnosticSeverityError DiagnosticSeverity = 1 + + // DiagnosticSeverityWarning reports a warning. + DiagnosticSeverityWarning DiagnosticSeverity = 2 + + // DiagnosticSeverityInformation reports an information. + DiagnosticSeverityInformation DiagnosticSeverity = 3 + + // DiagnosticSeverityHint reports a hint. + DiagnosticSeverityHint DiagnosticSeverity = 4 +) + +// String implements fmt.Stringer. +func (d DiagnosticSeverity) String() string { + switch d { + case DiagnosticSeverityError: + return "Error" + case DiagnosticSeverityWarning: + return "Warning" + case DiagnosticSeverityInformation: + return "Information" + case DiagnosticSeverityHint: + return "Hint" + default: + return strconv.FormatFloat(float64(d), 'f', -10, 64) + } +} + +// CodeDescription is the structure to capture a description for an error code. +// +// @since 3.16.0. +type CodeDescription struct { + // Href an URI to open with more information about the diagnostic error. + Href URI `json:"href"` +} + +// DiagnosticTag is the diagnostic tags. +// +// @since 3.15.0. +type DiagnosticTag float64 + +// list of DiagnosticTag. +const ( + // DiagnosticTagUnnecessary unused or unnecessary code. + // + // Clients are allowed to render diagnostics with this tag faded out instead of having + // an error squiggle. + DiagnosticTagUnnecessary DiagnosticTag = 1 + + // DiagnosticTagDeprecated deprecated or obsolete code. + // + // Clients are allowed to rendered diagnostics with this tag strike through. + DiagnosticTagDeprecated DiagnosticTag = 2 +) + +// String implements fmt.Stringer. +func (d DiagnosticTag) String() string { + switch d { + case DiagnosticTagUnnecessary: + return "Unnecessary" + case DiagnosticTagDeprecated: + return "Deprecated" + default: + return strconv.FormatFloat(float64(d), 'f', -10, 64) + } +} + +// DiagnosticRelatedInformation represents a related message and source code location for a diagnostic. +// +// This should be used to point to code locations that cause or related to a diagnostics, e.g when duplicating +// a symbol in a scope. +type DiagnosticRelatedInformation struct { + // Location is the location of this related diagnostic information. + Location Location `json:"location"` + + // Message is the message of this related diagnostic information. + Message string `json:"message"` +} + +// PublishDiagnosticsParams represents a params of PublishDiagnostics notification. +type PublishDiagnosticsParams struct { + // URI is the URI for which diagnostic information is reported. + URI DocumentURI `json:"uri"` + + // Version optional the version number of the document the diagnostics are published for. + // + // @since 3.15 + Version uint32 `json:"version,omitempty"` + + // Diagnostics an array of diagnostic information items. + Diagnostics []Diagnostic `json:"diagnostics"` +} diff --git a/vendor/go.lsp.dev/protocol/doc.go b/vendor/go.lsp.dev/protocol/doc.go new file mode 100644 index 00000000000..487378392c3 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/doc.go @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +// Package protocol implements Language Server Protocol specification in Go. +// +// This package contains the structs that map directly to the wire format +// of the Language Server Protocol. +// +// It is a literal transcription, with unmodified comments, and only the changes +// required to make it Go code. +// +// - Names are uppercased to export them. +// +// - All fields have JSON tags added to correct the names. +// +// - Fields marked with a ? are also marked as "omitempty". +// +// - Fields that are "|| null" are made pointers. +// +// - Fields that are string or number are left as string. +// +// - Fields that are type "number" are made float64. +package protocol // import "go.lsp.dev/protocol" diff --git a/vendor/go.lsp.dev/protocol/errors.go b/vendor/go.lsp.dev/protocol/errors.go new file mode 100644 index 00000000000..56645f687ee --- /dev/null +++ b/vendor/go.lsp.dev/protocol/errors.go @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import "go.lsp.dev/jsonrpc2" + +const ( + // LSPReservedErrorRangeStart is the start range of LSP reserved error codes. + // + // It doesn't denote a real error code. + // + // @since 3.16.0. + LSPReservedErrorRangeStart jsonrpc2.Code = -32899 + + // ContentModified is the state change that invalidates the result of a request in execution. + // + // Defined by the protocol. + CodeContentModified jsonrpc2.Code = -32801 + + // RequestCancelled is the cancellation error. + // + // Defined by the protocol. + CodeRequestCancelled jsonrpc2.Code = -32800 + + // LSPReservedErrorRangeEnd is the end range of LSP reserved error codes. + // + // It doesn't denote a real error code. + // + // @since 3.16.0. + LSPReservedErrorRangeEnd jsonrpc2.Code = -32800 +) + +var ( + // ErrContentModified should be used when a request is canceled early. + ErrContentModified = jsonrpc2.NewError(CodeContentModified, "cancelled JSON-RPC") + + // ErrRequestCancelled should be used when a request is canceled early. + ErrRequestCancelled = jsonrpc2.NewError(CodeRequestCancelled, "cancelled JSON-RPC") +) diff --git a/vendor/go.lsp.dev/protocol/general.go b/vendor/go.lsp.dev/protocol/general.go new file mode 100644 index 00000000000..1693b0cccb0 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/general.go @@ -0,0 +1,461 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// TraceValue represents a InitializeParams Trace mode. +type TraceValue string + +// list of TraceValue. +const ( + // TraceOff disable tracing. + TraceOff TraceValue = "off" + + // TraceMessage normal tracing mode. + TraceMessage TraceValue = "message" + + // TraceVerbose verbose tracing mode. + TraceVerbose TraceValue = "verbose" +) + +// ClientInfo information about the client. +// +// @since 3.15.0. +type ClientInfo struct { + // Name is the name of the client as defined by the client. + Name string `json:"name"` + + // Version is the client's version as defined by the client. + Version string `json:"version,omitempty"` +} + +// InitializeParams params of Initialize request. +type InitializeParams struct { + WorkDoneProgressParams + + // ProcessID is the process Id of the parent process that started + // the server. Is null if the process has not been started by another process. + // If the parent process is not alive then the server should exit (see exit notification) its process. + ProcessID int32 `json:"processId"` + + // ClientInfo is the information about the client. + // + // @since 3.15.0 + ClientInfo *ClientInfo `json:"clientInfo,omitempty"` + + // Locale is the locale the client is currently showing the user interface + // in. This must not necessarily be the locale of the operating + // system. + // + // Uses IETF language tags as the value's syntax + // (See https://en.wikipedia.org/wiki/IETF_language_tag) + // + // @since 3.16.0. + Locale string `json:"locale,omitempty"` + + // RootPath is the rootPath of the workspace. Is null + // if no folder is open. + // + // Deprecated: Use RootURI instead. + RootPath string `json:"rootPath,omitempty"` + + // RootURI is the rootUri of the workspace. Is null if no + // folder is open. If both `rootPath` and "rootUri" are set + // "rootUri" wins. + // + // Deprecated: Use WorkspaceFolders instead. + RootURI DocumentURI `json:"rootUri,omitempty"` + + // InitializationOptions user provided initialization options. + InitializationOptions interface{} `json:"initializationOptions,omitempty"` + + // Capabilities is the capabilities provided by the client (editor or tool) + Capabilities ClientCapabilities `json:"capabilities"` + + // Trace is the initial trace setting. If omitted trace is disabled ('off'). + Trace TraceValue `json:"trace,omitempty"` + + // WorkspaceFolders is the workspace folders configured in the client when the server starts. + // This property is only available if the client supports workspace folders. + // It can be `null` if the client supports workspace folders but none are + // configured. + // + // @since 3.6.0. + WorkspaceFolders []WorkspaceFolder `json:"workspaceFolders,omitempty"` +} + +// InitializeResult result of ClientCapabilities. +type InitializeResult struct { + // Capabilities is the capabilities the language server provides. + Capabilities ServerCapabilities `json:"capabilities"` + + // ServerInfo Information about the server. + // + // @since 3.15.0. + ServerInfo *ServerInfo `json:"serverInfo,omitempty"` +} + +// LogTraceParams params of LogTrace notification. +// +// @since 3.16.0. +type LogTraceParams struct { + // Message is the message to be logged. + Message string `json:"message"` + + // Verbose is the additional information that can be computed if the "trace" configuration + // is set to "verbose". + Verbose TraceValue `json:"verbose,omitempty"` +} + +// SetTraceParams params of SetTrace notification. +// +// @since 3.16.0. +type SetTraceParams struct { + // Value is the new value that should be assigned to the trace setting. + Value TraceValue `json:"value"` +} + +// FileOperationPatternKind is a pattern kind describing if a glob pattern matches a file a folder or +// both. +// +// @since 3.16.0. +type FileOperationPatternKind string + +// list of FileOperationPatternKind. +const ( + // FileOperationPatternKindFile is the pattern matches a file only. + FileOperationPatternKindFile FileOperationPatternKind = "file" + + // FileOperationPatternKindFolder is the pattern matches a folder only. + FileOperationPatternKindFolder FileOperationPatternKind = "folder" +) + +// FileOperationPatternOptions matching options for the file operation pattern. +// +// @since 3.16.0. +type FileOperationPatternOptions struct { + // IgnoreCase is The pattern should be matched ignoring casing. + IgnoreCase bool `json:"ignoreCase,omitempty"` +} + +// FileOperationPattern a pattern to describe in which file operation requests or notifications +// the server is interested in. +// +// @since 3.16.0. +type FileOperationPattern struct { + // The glob pattern to match. Glob patterns can have the following syntax: + // - `*` to match one or more characters in a path segment + // - `?` to match on one character in a path segment + // - `**` to match any number of path segments, including none + // - `{}` to group conditions (e.g. `**ā€‹/*.{ts,js}` matches all TypeScript + // and JavaScript files) + // - `[]` to declare a range of characters to match in a path segment + // (e.g., `example.[0-9]` to match on `example.0`, `example.1`, ā€¦) + // - `[!...]` to negate a range of characters to match in a path segment + // (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but + // not `example.0`) + Glob string `json:"glob"` + + // Matches whether to match files or folders with this pattern. + // + // Matches both if undefined. + Matches FileOperationPatternKind `json:"matches,omitempty"` + + // Options additional options used during matching. + Options FileOperationPatternOptions `json:"options,omitempty"` +} + +// FileOperationFilter is a filter to describe in which file operation requests or notifications +// the server is interested in. +// +// @since 3.16.0. +type FileOperationFilter struct { + // Scheme is a URI like "file" or "untitled". + Scheme string `json:"scheme,omitempty"` + + // Pattern is the actual file operation pattern. + Pattern FileOperationPattern `json:"pattern"` +} + +// CreateFilesParams is the parameters sent in notifications/requests for user-initiated creation +// of files. +// +// @since 3.16.0. +type CreateFilesParams struct { + // Files an array of all files/folders created in this operation. + Files []FileCreate `json:"files"` +} + +// FileCreate nepresents information on a file/folder create. +// +// @since 3.16.0. +type FileCreate struct { + // URI is a file:// URI for the location of the file/folder being created. + URI string `json:"uri"` +} + +// RenameFilesParams is the parameters sent in notifications/requests for user-initiated renames +// of files. +// +// @since 3.16.0. +type RenameFilesParams struct { + // Files an array of all files/folders renamed in this operation. When a folder + // is renamed, only the folder will be included, and not its children. + Files []FileRename `json:"files"` +} + +// FileRename represents information on a file/folder rename. +// +// @since 3.16.0. +type FileRename struct { + // OldURI is a file:// URI for the original location of the file/folder being renamed. + OldURI string `json:"oldUri"` + + // NewURI is a file:// URI for the new location of the file/folder being renamed. + NewURI string `json:"newUri"` +} + +// DeleteFilesParams is the parameters sent in notifications/requests for user-initiated deletes +// of files. +// +// @since 3.16.0. +type DeleteFilesParams struct { + // Files an array of all files/folders deleted in this operation. + Files []FileDelete `json:"files"` +} + +// FileDelete represents information on a file/folder delete. +// +// @since 3.16.0. +type FileDelete struct { + // URI is a file:// URI for the location of the file/folder being deleted. + URI string `json:"uri"` +} + +// DocumentHighlightParams params of DocumentHighlight request. +// +// @since 3.15.0. +type DocumentHighlightParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// DeclarationParams params of Declaration request. +// +// @since 3.15.0. +type DeclarationParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// DefinitionParams params of Definition request. +// +// @since 3.15.0. +type DefinitionParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// TypeDefinitionParams params of TypeDefinition request. +// +// @since 3.15.0. +type TypeDefinitionParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// ImplementationParams params of Implementation request. +// +// @since 3.15.0. +type ImplementationParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// ShowDocumentParams params to show a document. +// +// @since 3.16.0. +type ShowDocumentParams struct { + // URI is the document uri to show. + URI URI `json:"uri"` + + // External indicates to show the resource in an external program. + // To show for example `https://code.visualstudio.com/` + // in the default WEB browser set `external` to `true`. + External bool `json:"external,omitempty"` + + // TakeFocus an optional property to indicate whether the editor + // showing the document should take focus or not. + // Clients might ignore this property if an external + // program is started. + TakeFocus bool `json:"takeFocus,omitempty"` + + // Selection an optional selection range if the document is a text + // document. Clients might ignore the property if an + // external program is started or the file is not a text + // file. + Selection *Range `json:"selection,omitempty"` +} + +// ShowDocumentResult is the result of an show document request. +// +// @since 3.16.0. +type ShowDocumentResult struct { + // Success a boolean indicating if the show was successful. + Success bool `json:"success"` +} + +// ServerInfo Information about the server. +// +// @since 3.15.0. +type ServerInfo struct { + // Name is the name of the server as defined by the server. + Name string `json:"name"` + + // Version is the server's version as defined by the server. + Version string `json:"version,omitempty"` +} + +// InitializeError known error codes for an "InitializeError". +type InitializeError struct { + // Retry indicates whether the client execute the following retry logic: + // (1) show the message provided by the ResponseError to the user + // (2) user selects retry or cancel + // (3) if user selected retry the initialize method is sent again. + Retry bool `json:"retry,omitempty"` +} + +// ReferencesOptions ReferencesProvider options. +// +// @since 3.15.0. +type ReferencesOptions struct { + WorkDoneProgressOptions +} + +// WorkDoneProgressOptions WorkDoneProgress options. +// +// @since 3.15.0. +type WorkDoneProgressOptions struct { + WorkDoneProgress bool `json:"workDoneProgress,omitempty"` +} + +// LinkedEditingRangeParams params for the LinkedEditingRange request. +// +// @since 3.16.0. +type LinkedEditingRangeParams struct { + TextDocumentPositionParams + WorkDoneProgressParams +} + +// LinkedEditingRanges result of LinkedEditingRange request. +// +// @since 3.16.0. +type LinkedEditingRanges struct { + // Ranges a list of ranges that can be renamed together. + // + // The ranges must have identical length and contain identical text content. + // + // The ranges cannot overlap. + Ranges []Range `json:"ranges"` + + // WordPattern an optional word pattern (regular expression) that describes valid contents for + // the given ranges. + // + // If no pattern is provided, the client configuration's word pattern will be used. + WordPattern string `json:"wordPattern,omitempty"` +} + +// MonikerParams params for the Moniker request. +// +// @since 3.16.0. +type MonikerParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// UniquenessLevel is the Moniker uniqueness level to define scope of the moniker. +// +// @since 3.16.0. +type UniquenessLevel string + +// list of UniquenessLevel. +const ( + // UniquenessLevelDocument is the moniker is only unique inside a document. + UniquenessLevelDocument UniquenessLevel = "document" + + // UniquenessLevelProject is the moniker is unique inside a project for which a dump got created. + UniquenessLevelProject UniquenessLevel = "project" + + // UniquenessLevelGroup is the moniker is unique inside the group to which a project belongs. + UniquenessLevelGroup UniquenessLevel = "group" + + // UniquenessLevelScheme is the moniker is unique inside the moniker scheme. + UniquenessLevelScheme UniquenessLevel = "scheme" + + // UniquenessLevelGlobal is the moniker is globally unique. + UniquenessLevelGlobal UniquenessLevel = "global" +) + +// MonikerKind is the moniker kind. +// +// @since 3.16.0. +type MonikerKind string + +// list of MonikerKind. +const ( + // MonikerKindImport is the moniker represent a symbol that is imported into a project. + MonikerKindImport MonikerKind = "import" + + // MonikerKindExport is the moniker represents a symbol that is exported from a project. + MonikerKindExport MonikerKind = "export" + + // MonikerKindLocal is the moniker represents a symbol that is local to a project (e.g. a local + // variable of a function, a class not visible outside the project, ...). + MonikerKindLocal MonikerKind = "local" +) + +// Moniker definition to match LSIF 0.5 moniker definition. +// +// @since 3.16.0. +type Moniker struct { + // Scheme is the scheme of the moniker. For example tsc or .Net. + Scheme string `json:"scheme"` + + // Identifier is the identifier of the moniker. + // + // The value is opaque in LSIF however schema owners are allowed to define the structure if they want. + Identifier string `json:"identifier"` + + // Unique is the scope in which the moniker is unique. + Unique UniquenessLevel `json:"unique"` + + // Kind is the moniker kind if known. + Kind MonikerKind `json:"kind,omitempty"` +} + +// StaticRegistrationOptions staticRegistration options to be returned in the initialize request. +type StaticRegistrationOptions struct { + // ID is the id used to register the request. The id can be used to deregister + // the request again. See also Registration#id. + ID string `json:"id,omitempty"` +} + +// DocumentLinkRegistrationOptions DocumentLinkRegistration options. +type DocumentLinkRegistrationOptions struct { + TextDocumentRegistrationOptions + + // ResolveProvider document links have a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// InitializedParams params of Initialized notification. +type InitializedParams struct{} + +// WorkspaceFolders represents a slice of WorkspaceFolder. +type WorkspaceFolders []WorkspaceFolder diff --git a/vendor/go.lsp.dev/protocol/handler.go b/vendor/go.lsp.dev/protocol/handler.go new file mode 100644 index 00000000000..ac253a246e2 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/handler.go @@ -0,0 +1,88 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "context" + "fmt" + + "github.com/segmentio/encoding/json" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/pkg/xcontext" +) + +// CancelHandler handler of cancelling. +func CancelHandler(handler jsonrpc2.Handler) jsonrpc2.Handler { + handler, canceller := jsonrpc2.CancelHandler(handler) + + h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error { + if req.Method() != MethodCancelRequest { + // TODO(iancottrell): See if we can generate a reply for the request to be cancelled + // at the point of cancellation rather than waiting for gopls to naturally reply. + // To do that, we need to keep track of whether a reply has been sent already and + // be careful about racing between the two paths. + // TODO(iancottrell): Add a test that watches the stream and verifies the response + // for the cancelled request flows. + reply := func(ctx context.Context, resp interface{}, err error) error { + // https://microsoft.github.io/language-server-protocol/specifications/specification-current/#cancelRequest + if ctx.Err() != nil && err == nil { + err = ErrRequestCancelled + } + ctx = xcontext.Detach(ctx) + + return reply(ctx, resp, err) + } + + return handler(ctx, reply, req) + } + + var params CancelParams + if err := json.Unmarshal(req.Params(), ¶ms); err != nil { + return replyParseError(ctx, reply, err) + } + + switch id := params.ID.(type) { + case int32: + canceller(jsonrpc2.NewNumberID(id)) + case string: + canceller(jsonrpc2.NewStringID(id)) + default: + return replyParseError(ctx, reply, fmt.Errorf("request ID %v malformed", id)) + } + + return reply(ctx, nil, nil) + } + + return h +} + +// Handlers default jsonrpc2.Handler. +func Handlers(handler jsonrpc2.Handler) jsonrpc2.Handler { + return CancelHandler( + jsonrpc2.AsyncHandler( + jsonrpc2.ReplyHandler(handler), + ), + ) +} + +// Call calls method to params and result. +func Call(ctx context.Context, conn jsonrpc2.Conn, method string, params, result interface{}) error { + id, err := conn.Call(ctx, method, params, result) + if ctx.Err() != nil { + notifyCancel(ctx, conn, id) + } + + return err +} + +func notifyCancel(ctx context.Context, conn jsonrpc2.Conn, id jsonrpc2.ID) { + ctx = xcontext.Detach(ctx) + // Note that only *jsonrpc2.ID implements json.Marshaler. + conn.Notify(ctx, MethodCancelRequest, &CancelParams{ID: &id}) +} + +func replyParseError(ctx context.Context, reply jsonrpc2.Replier, err error) error { + return reply(ctx, nil, fmt.Errorf("%s: %w", jsonrpc2.ErrParse, err)) +} diff --git a/vendor/go.lsp.dev/protocol/language.go b/vendor/go.lsp.dev/protocol/language.go new file mode 100644 index 00000000000..221d72aa2cc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/language.go @@ -0,0 +1,1316 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// CompletionParams params of Completion request. +type CompletionParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams + + // Context is the completion context. This is only available if the client specifies + // to send this using `ClientCapabilities.textDocument.completion.contextSupport === true` + Context *CompletionContext `json:"context,omitempty"` +} + +// CompletionTriggerKind how a completion was triggered. +type CompletionTriggerKind float64 + +const ( + // CompletionTriggerKindInvoked completion was triggered by typing an identifier (24x7 code + // complete), manual invocation (e.g Ctrl+Space) or via API. + CompletionTriggerKindInvoked CompletionTriggerKind = 1 + + // CompletionTriggerKindTriggerCharacter completion was triggered by a trigger character specified by + // the `triggerCharacters` properties of the `CompletionRegistrationOptions`. + CompletionTriggerKindTriggerCharacter CompletionTriggerKind = 2 + + // CompletionTriggerKindTriggerForIncompleteCompletions completion was re-triggered as the current completion list is incomplete. + CompletionTriggerKindTriggerForIncompleteCompletions CompletionTriggerKind = 3 +) + +// String implements fmt.Stringer. +func (k CompletionTriggerKind) String() string { + switch k { + case CompletionTriggerKindInvoked: + return "Invoked" + case CompletionTriggerKindTriggerCharacter: + return "TriggerCharacter" + case CompletionTriggerKindTriggerForIncompleteCompletions: + return "TriggerForIncompleteCompletions" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionContext contains additional information about the context in which a completion request is triggered. +type CompletionContext struct { + // TriggerCharacter is the trigger character (a single character) that has trigger code complete. + // Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter` + TriggerCharacter string `json:"triggerCharacter,omitempty"` + + // TriggerKind how the completion was triggered. + TriggerKind CompletionTriggerKind `json:"triggerKind"` +} + +// CompletionList represents a collection of [completion items](#CompletionItem) to be presented +// in the editor. +type CompletionList struct { + // IsIncomplete this list it not complete. Further typing should result in recomputing + // this list. + IsIncomplete bool `json:"isIncomplete"` + + // Items is the completion items. + Items []CompletionItem `json:"items"` +} + +// InsertTextFormat defines whether the insert text in a completion item should be interpreted as +// plain text or a snippet. +type InsertTextFormat float64 + +const ( + // InsertTextFormatPlainText is the primary text to be inserted is treated as a plain string. + InsertTextFormatPlainText InsertTextFormat = 1 + + // InsertTextFormatSnippet is the primary text to be inserted is treated as a snippet. + // + // A snippet can define tab stops and placeholders with `$1`, `$2` + // and `${3:foo}`. `$0` defines the final tab stop, it defaults to + // the end of the snippet. Placeholders with equal identifiers are linked, + // that is typing in one will update others too. + InsertTextFormatSnippet InsertTextFormat = 2 +) + +// String implements fmt.Stringer. +func (tf InsertTextFormat) String() string { + switch tf { + case InsertTextFormatPlainText: + return "PlainText" + case InsertTextFormatSnippet: + return "Snippet" + default: + return strconv.FormatFloat(float64(tf), 'f', -10, 64) + } +} + +// InsertReplaceEdit is a special text edit to provide an insert and a replace operation. +// +// @since 3.16.0. +type InsertReplaceEdit struct { + // NewText is the string to be inserted. + NewText string `json:"newText"` + + // Insert is the range if the insert is requested. + Insert Range `json:"insert"` + + // Replace is the range if the replace is requested. + Replace Range `json:"replace"` +} + +// InsertTextMode how whitespace and indentation is handled during completion +// item insertion. +// +// @since 3.16.0. +type InsertTextMode float64 + +const ( + // AsIs is the insertion or replace strings is taken as it is. If the + // value is multi line the lines below the cursor will be + // inserted using the indentation defined in the string value. + // The client will not apply any kind of adjustments to the + // string. + InsertTextModeAsIs InsertTextMode = 1 + + // AdjustIndentation is the editor adjusts leading whitespace of new lines so that + // they match the indentation up to the cursor of the line for + // which the item is accepted. + // + // Consider a line like this: <2tabs><3tabs>foo. Accepting a + // multi line completion item is indented using 2 tabs and all + // following lines inserted will be indented using 2 tabs as well. + InsertTextModeAdjustIndentation InsertTextMode = 2 +) + +// String returns a string representation of the InsertTextMode. +func (k InsertTextMode) String() string { + switch k { + case InsertTextModeAsIs: + return "AsIs" + case InsertTextModeAdjustIndentation: + return "AdjustIndentation" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionItem item of CompletionList. +type CompletionItem struct { + // AdditionalTextEdits an optional array of additional text edits that are applied when + // selecting this completion. Edits must not overlap (including the same insert position) + // with the main edit nor with themselves. + // + // Additional text edits should be used to change text unrelated to the current cursor position + // (for example adding an import statement at the top of the file if the completion item will + // insert an unqualified type). + AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` + + // Command an optional command that is executed *after* inserting this completion. *Note* that + // additional modifications to the current document should be described with the + // additionalTextEdits-property. + Command *Command `json:"command,omitempty"` + + // CommitCharacters an optional set of characters that when pressed while this completion is active will accept it first and + // then type that character. *Note* that all commit characters should have `length=1` and that superfluous + // characters will be ignored. + CommitCharacters []string `json:"commitCharacters,omitempty"` + + // Tags is the tag for this completion item. + // + // @since 3.15.0. + Tags []CompletionItemTag `json:"tags,omitempty"` + + // Data an data entry field that is preserved on a completion item between + // a completion and a completion resolve request. + Data interface{} `json:"data,omitempty"` + + // Deprecated indicates if this item is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // Detail a human-readable string with additional information + // about this item, like type or symbol information. + Detail string `json:"detail,omitempty"` + + // Documentation a human-readable string that represents a doc-comment. + Documentation interface{} `json:"documentation,omitempty"` + + // FilterText a string that should be used when filtering a set of + // completion items. When `falsy` the label is used. + FilterText string `json:"filterText,omitempty"` + + // InsertText a string that should be inserted into a document when selecting + // this completion. When `falsy` the label is used. + // + // The `insertText` is subject to interpretation by the client side. + // Some tools might not take the string literally. For example + // VS Code when code complete is requested in this example `con` + // and a completion item with an `insertText` of `console` is provided it + // will only insert `sole`. Therefore it is recommended to use `textEdit` instead + // since it avoids additional client side interpretation. + InsertText string `json:"insertText,omitempty"` + + // InsertTextFormat is the format of the insert text. The format applies to both the `insertText` property + // and the `newText` property of a provided `textEdit`. + InsertTextFormat InsertTextFormat `json:"insertTextFormat,omitempty"` + + // InsertTextMode how whitespace and indentation is handled during completion + // item insertion. If not provided the client's default value depends on + // the `textDocument.completion.insertTextMode` client capability. + // + // @since 3.16.0. + InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"` + + // Kind is the kind of this completion item. Based of the kind + // an icon is chosen by the editor. + Kind CompletionItemKind `json:"kind,omitempty"` + + // Label is the label of this completion item. By default + // also the text that is inserted when selecting + // this completion. + Label string `json:"label"` + + // Preselect select this item when showing. + // + // *Note* that only one completion item can be selected and that the + // tool / client decides which item that is. The rule is that the *first* + // item of those that match best is selected. + Preselect bool `json:"preselect,omitempty"` + + // SortText a string that should be used when comparing this item + // with other items. When `falsy` the label is used. + SortText string `json:"sortText,omitempty"` + + // TextEdit an edit which is applied to a document when selecting this completion. When an edit is provided the value of + // `insertText` is ignored. + // + // NOTE: The range of the edit must be a single line range and it must contain the position at which completion + // has been requested. + // + // Most editors support two different operations when accepting a completion + // item. One is to insert a completion text and the other is to replace an + // existing text with a completion text. Since this can usually not be + // predetermined by a server it can report both ranges. Clients need to + // signal support for `InsertReplaceEdits` via the + // "textDocument.completion.insertReplaceSupport" client capability + // property. + // + // NOTE 1: The text edit's range as well as both ranges from an insert + // replace edit must be a [single line] and they must contain the position + // at which completion has been requested. + // + // NOTE 2: If an "InsertReplaceEdit" is returned the edit's insert range + // must be a prefix of the edit's replace range, that means it must be + // contained and starting at the same position. + // + // @since 3.16.0 additional type "InsertReplaceEdit". + TextEdit *TextEdit `json:"textEdit,omitempty"` // *TextEdit | *InsertReplaceEdit +} + +// CompletionItemKind is the completion item kind values the client supports. When this +// property exists the client also guarantees that it will +// handle values outside its set gracefully and falls back +// to a default value when unknown. +// +// If this property is not present the client only supports +// the completion items kinds from `Text` to `Reference` as defined in +// the initial version of the protocol. +type CompletionItemKind float64 + +const ( + // CompletionItemKindText text completion kind. + CompletionItemKindText CompletionItemKind = 1 + // CompletionItemKindMethod method completion kind. + CompletionItemKindMethod CompletionItemKind = 2 + // CompletionItemKindFunction function completion kind. + CompletionItemKindFunction CompletionItemKind = 3 + // CompletionItemKindConstructor constructor completion kind. + CompletionItemKindConstructor CompletionItemKind = 4 + // CompletionItemKindField field completion kind. + CompletionItemKindField CompletionItemKind = 5 + // CompletionItemKindVariable variable completion kind. + CompletionItemKindVariable CompletionItemKind = 6 + // CompletionItemKindClass class completion kind. + CompletionItemKindClass CompletionItemKind = 7 + // CompletionItemKindInterface interface completion kind. + CompletionItemKindInterface CompletionItemKind = 8 + // CompletionItemKindModule module completion kind. + CompletionItemKindModule CompletionItemKind = 9 + // CompletionItemKindProperty property completion kind. + CompletionItemKindProperty CompletionItemKind = 10 + // CompletionItemKindUnit unit completion kind. + CompletionItemKindUnit CompletionItemKind = 11 + // CompletionItemKindValue value completion kind. + CompletionItemKindValue CompletionItemKind = 12 + // CompletionItemKindEnum enum completion kind. + CompletionItemKindEnum CompletionItemKind = 13 + // CompletionItemKindKeyword keyword completion kind. + CompletionItemKindKeyword CompletionItemKind = 14 + // CompletionItemKindSnippet snippet completion kind. + CompletionItemKindSnippet CompletionItemKind = 15 + // CompletionItemKindColor color completion kind. + CompletionItemKindColor CompletionItemKind = 16 + // CompletionItemKindFile file completion kind. + CompletionItemKindFile CompletionItemKind = 17 + // CompletionItemKindReference reference completion kind. + CompletionItemKindReference CompletionItemKind = 18 + // CompletionItemKindFolder folder completion kind. + CompletionItemKindFolder CompletionItemKind = 19 + // CompletionItemKindEnumMember enum member completion kind. + CompletionItemKindEnumMember CompletionItemKind = 20 + // CompletionItemKindConstant constant completion kind. + CompletionItemKindConstant CompletionItemKind = 21 + // CompletionItemKindStruct struct completion kind. + CompletionItemKindStruct CompletionItemKind = 22 + // CompletionItemKindEvent event completion kind. + CompletionItemKindEvent CompletionItemKind = 23 + // CompletionItemKindOperator operator completion kind. + CompletionItemKindOperator CompletionItemKind = 24 + // CompletionItemKindTypeParameter type parameter completion kind. + CompletionItemKindTypeParameter CompletionItemKind = 25 +) + +// String implements fmt.Stringer. +//nolint:cyclop +func (k CompletionItemKind) String() string { + switch k { + case CompletionItemKindText: + return "Text" + case CompletionItemKindMethod: + return "Method" + case CompletionItemKindFunction: + return "Function" + case CompletionItemKindConstructor: + return "Constructor" + case CompletionItemKindField: + return "Field" + case CompletionItemKindVariable: + return "Variable" + case CompletionItemKindClass: + return "Class" + case CompletionItemKindInterface: + return "Interface" + case CompletionItemKindModule: + return "Module" + case CompletionItemKindProperty: + return "Property" + case CompletionItemKindUnit: + return "Unit" + case CompletionItemKindValue: + return "Value" + case CompletionItemKindEnum: + return "Enum" + case CompletionItemKindKeyword: + return "Keyword" + case CompletionItemKindSnippet: + return "Snippet" + case CompletionItemKindColor: + return "Color" + case CompletionItemKindFile: + return "File" + case CompletionItemKindReference: + return "Reference" + case CompletionItemKindFolder: + return "Folder" + case CompletionItemKindEnumMember: + return "EnumMember" + case CompletionItemKindConstant: + return "Constant" + case CompletionItemKindStruct: + return "Struct" + case CompletionItemKindEvent: + return "Event" + case CompletionItemKindOperator: + return "Operator" + case CompletionItemKindTypeParameter: + return "TypeParameter" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionItemTag completion item tags are extra annotations that tweak the rendering of a completion +// item. +// +// @since 3.15.0. +type CompletionItemTag float64 + +// list of CompletionItemTag. +const ( + // CompletionItemTagDeprecated is the render a completion as obsolete, usually using a strike-out. + CompletionItemTagDeprecated CompletionItemTag = 1 +) + +// String returns a string representation of the type. +func (c CompletionItemTag) String() string { + switch c { + case CompletionItemTagDeprecated: + return "Deprecated" + default: + return strconv.FormatFloat(float64(c), 'f', -10, 64) + } +} + +// CompletionRegistrationOptions CompletionRegistration options. +type CompletionRegistrationOptions struct { + TextDocumentRegistrationOptions + + // TriggerCharacters most tools trigger completion request automatically without explicitly requesting + // it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user + // starts to type an identifier. For example if the user types `c` in a JavaScript file + // code complete will automatically pop up present `console` besides others as a + // completion item. Characters that make up identifiers don't need to be listed here. + // + // If code complete should automatically be trigger on characters not being valid inside + // an identifier (for example `.` in JavaScript) list them in `triggerCharacters`. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` + + // ResolveProvider is the server provides support to resolve additional + // information for a completion item. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// HoverParams params of Hover request. +// +// @since 3.15.0. +type HoverParams struct { + TextDocumentPositionParams + WorkDoneProgressParams +} + +// Hover is the result of a hover request. +type Hover struct { + // Contents is the hover's content + Contents MarkupContent `json:"contents"` + + // Range an optional range is a range inside a text document + // that is used to visualize a hover, e.g. by changing the background color. + Range *Range `json:"range,omitempty"` +} + +// SignatureHelpParams params of SignatureHelp request. +// +// @since 3.15.0. +type SignatureHelpParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + + // context is the signature help context. + // + // This is only available if the client specifies to send this using the + // client capability `textDocument.signatureHelp.contextSupport === true`. + // + // @since 3.15.0. + Context *SignatureHelpContext `json:"context,omitempty"` +} + +// SignatureHelpTriggerKind is the how a signature help was triggered. +// +// @since 3.15.0. +type SignatureHelpTriggerKind float64 + +// list of SignatureHelpTriggerKind. +const ( + // SignatureHelpTriggerKindInvoked is the signature help was invoked manually by the user or by a command. + SignatureHelpTriggerKindInvoked SignatureHelpTriggerKind = 1 + + // SignatureHelpTriggerKindTriggerCharacter is the signature help was triggered by a trigger character. + SignatureHelpTriggerKindTriggerCharacter SignatureHelpTriggerKind = 2 + + // SignatureHelpTriggerKindContentChange is the signature help was triggered by the cursor moving or + // by the document content changing. + SignatureHelpTriggerKindContentChange SignatureHelpTriggerKind = 3 +) + +// String returns a string representation of the type. +func (s SignatureHelpTriggerKind) String() string { + switch s { + case SignatureHelpTriggerKindInvoked: + return "Invoked" + case SignatureHelpTriggerKindTriggerCharacter: + return "TriggerCharacter" + case SignatureHelpTriggerKindContentChange: + return "ContentChange" + default: + return strconv.FormatFloat(float64(s), 'f', -10, 64) + } +} + +// SignatureHelpContext is the additional information about the context in which a +// signature help request was triggered. +// +// @since 3.15.0. +type SignatureHelpContext struct { + // TriggerKind is the action that caused signature help to be triggered. + TriggerKind SignatureHelpTriggerKind `json:"triggerKind"` + + // Character that caused signature help to be triggered. + // + // This is undefined when + // TriggerKind != SignatureHelpTriggerKindTriggerCharacter + TriggerCharacter string `json:"triggerCharacter,omitempty"` + + // IsRetrigger is the `true` if signature help was already showing when it was triggered. + // + // Retriggers occur when the signature help is already active and can be + // caused by actions such as typing a trigger character, a cursor move, + // or document content changes. + IsRetrigger bool `json:"isRetrigger"` + + // ActiveSignatureHelp is the currently active SignatureHelp. + // + // The `activeSignatureHelp` has its `SignatureHelp.activeSignature` field + // updated based on the user navigating through available signatures. + ActiveSignatureHelp *SignatureHelp `json:"activeSignatureHelp,omitempty"` +} + +// SignatureHelp signature help represents the signature of something +// callable. There can be multiple signature but only one +// active and only one active parameter. +type SignatureHelp struct { + // Signatures one or more signatures. + Signatures []SignatureInformation `json:"signatures"` + + // ActiveParameter is the active parameter of the active signature. If omitted or the value + // lies outside the range of `signatures[activeSignature].parameters` + // defaults to 0 if the active signature has parameters. If + // the active signature has no parameters it is ignored. + // In future version of the protocol this property might become + // mandatory to better express the active parameter if the + // active signature does have any. + ActiveParameter uint32 `json:"activeParameter,omitempty"` + + // ActiveSignature is the active signature. If omitted or the value lies outside the + // range of `signatures` the value defaults to zero or is ignored if + // `signatures.length === 0`. Whenever possible implementors should + // make an active decision about the active signature and shouldn't + // rely on a default value. + // In future version of the protocol this property might become + // mandatory to better express this. + ActiveSignature uint32 `json:"activeSignature,omitempty"` +} + +// SignatureInformation is the client supports the following `SignatureInformation` +// specific properties. +type SignatureInformation struct { + // Label is the label of this signature. Will be shown in + // the UI. + // + // @since 3.16.0. + Label string `json:"label"` + + // Documentation is the human-readable doc-comment of this signature. Will be shown + // in the UI but can be omitted. + // + // @since 3.16.0. + Documentation interface{} `json:"documentation,omitempty"` // string | *MarkupContent + + // Parameters is the parameters of this signature. + // + // @since 3.16.0. + Parameters []ParameterInformation `json:"parameters,omitempty"` + + // ActiveParameterSupport is the client supports the `activeParameter` property on + // `SignatureInformation` literal. + // + // @since 3.16.0. + ActiveParameter uint32 `json:"activeParameter,omitempty"` +} + +// ParameterInformation represents a parameter of a callable-signature. A parameter can +// have a label and a doc-comment. +type ParameterInformation struct { + // Label is the label of this parameter information. + // + // Either a string or an inclusive start and exclusive end offsets within its containing + // signature label. (see SignatureInformation.label). The offsets are based on a UTF-16 + // string representation as "Position" and "Range" does. + // + // *Note*: a label of type string should be a substring of its containing signature label. + // Its intended use case is to highlight the parameter label part in the "SignatureInformation.label". + Label string `json:"label"` // string | [uint32, uint32] + + // Documentation is the human-readable doc-comment of this parameter. Will be shown + // in the UI but can be omitted. + Documentation interface{} `json:"documentation,omitempty"` // string | MarkupContent +} + +// SignatureHelpRegistrationOptions SignatureHelp Registration options. +type SignatureHelpRegistrationOptions struct { + TextDocumentRegistrationOptions + + // TriggerCharacters is the characters that trigger signature help + // automatically. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` +} + +// ReferenceParams params of References request. +// +// @since 3.15.0. +type ReferenceParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams + + // Context is the ReferenceParams context. + Context ReferenceContext `json:"context"` +} + +// ReferenceContext context of ReferenceParams. +type ReferenceContext struct { + // IncludeDeclaration include the declaration of the current symbol. + IncludeDeclaration bool `json:"includeDeclaration"` +} + +// DocumentHighlight a document highlight is a range inside a text document which deserves +// special attention. Usually a document highlight is visualized by changing +// the background color of its range. +type DocumentHighlight struct { + // Range is the range this highlight applies to. + Range Range `json:"range"` + + // Kind is the highlight kind, default is DocumentHighlightKind.Text. + Kind DocumentHighlightKind `json:"kind,omitempty"` +} + +// DocumentHighlightKind a document highlight kind. +type DocumentHighlightKind float64 + +const ( + // DocumentHighlightKindText a textual occurrence. + DocumentHighlightKindText DocumentHighlightKind = 1 + + // DocumentHighlightKindRead read-access of a symbol, like reading a variable. + DocumentHighlightKindRead DocumentHighlightKind = 2 + + // DocumentHighlightKindWrite write-access of a symbol, like writing to a variable. + DocumentHighlightKindWrite DocumentHighlightKind = 3 +) + +// String implements fmt.Stringer. +func (k DocumentHighlightKind) String() string { + switch k { + case DocumentHighlightKindText: + return "Text" + case DocumentHighlightKindRead: + return "Read" + case DocumentHighlightKindWrite: + return "Write" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// DocumentSymbolParams params of Document Symbols request. +type DocumentSymbolParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// SymbolKind specific capabilities for the `SymbolKind`. +// The symbol kind values the client supports. When this +// property exists the client also guarantees that it will +// handle values outside its set gracefully and falls back +// to a default value when unknown. +// +// If this property is not present the client only supports +// the symbol kinds from `File` to `Array` as defined in +// the initial version of the protocol. +type SymbolKind float64 + +const ( + // SymbolKindFile symbol of file. + SymbolKindFile SymbolKind = 1 + // SymbolKindModule symbol of module. + SymbolKindModule SymbolKind = 2 + // SymbolKindNamespace symbol of namespace. + SymbolKindNamespace SymbolKind = 3 + // SymbolKindPackage symbol of package. + SymbolKindPackage SymbolKind = 4 + // SymbolKindClass symbol of class. + SymbolKindClass SymbolKind = 5 + // SymbolKindMethod symbol of method. + SymbolKindMethod SymbolKind = 6 + // SymbolKindProperty symbol of property. + SymbolKindProperty SymbolKind = 7 + // SymbolKindField symbol of field. + SymbolKindField SymbolKind = 8 + // SymbolKindConstructor symbol of constructor. + SymbolKindConstructor SymbolKind = 9 + // SymbolKindEnum symbol of enum. + SymbolKindEnum SymbolKind = 10 + // SymbolKindInterface symbol of interface. + SymbolKindInterface SymbolKind = 11 + // SymbolKindFunction symbol of function. + SymbolKindFunction SymbolKind = 12 + // SymbolKindVariable symbol of variable. + SymbolKindVariable SymbolKind = 13 + // SymbolKindConstant symbol of constant. + SymbolKindConstant SymbolKind = 14 + // SymbolKindString symbol of string. + SymbolKindString SymbolKind = 15 + // SymbolKindNumber symbol of number. + SymbolKindNumber SymbolKind = 16 + // SymbolKindBoolean symbol of boolean. + SymbolKindBoolean SymbolKind = 17 + // SymbolKindArray symbol of array. + SymbolKindArray SymbolKind = 18 + // SymbolKindObject symbol of object. + SymbolKindObject SymbolKind = 19 + // SymbolKindKey symbol of key. + SymbolKindKey SymbolKind = 20 + // SymbolKindNull symbol of null. + SymbolKindNull SymbolKind = 21 + // SymbolKindEnumMember symbol of enum member. + SymbolKindEnumMember SymbolKind = 22 + // SymbolKindStruct symbol of struct. + SymbolKindStruct SymbolKind = 23 + // SymbolKindEvent symbol of event. + SymbolKindEvent SymbolKind = 24 + // SymbolKindOperator symbol of operator. + SymbolKindOperator SymbolKind = 25 + // SymbolKindTypeParameter symbol of type parameter. + SymbolKindTypeParameter SymbolKind = 26 +) + +// String implements fmt.Stringer. +//nolint:cyclop +func (k SymbolKind) String() string { + switch k { + case SymbolKindFile: + return "File" + case SymbolKindModule: + return "Module" + case SymbolKindNamespace: + return "Namespace" + case SymbolKindPackage: + return "Package" + case SymbolKindClass: + return "Class" + case SymbolKindMethod: + return "Method" + case SymbolKindProperty: + return "Property" + case SymbolKindField: + return "Field" + case SymbolKindConstructor: + return "Constructor" + case SymbolKindEnum: + return "Enum" + case SymbolKindInterface: + return "Interface" + case SymbolKindFunction: + return "Function" + case SymbolKindVariable: + return "Variable" + case SymbolKindConstant: + return "Constant" + case SymbolKindString: + return "String" + case SymbolKindNumber: + return "Number" + case SymbolKindBoolean: + return "Boolean" + case SymbolKindArray: + return "Array" + case SymbolKindObject: + return "Object" + case SymbolKindKey: + return "Key" + case SymbolKindNull: + return "Null" + case SymbolKindEnumMember: + return "EnumMember" + case SymbolKindStruct: + return "Struct" + case SymbolKindEvent: + return "Event" + case SymbolKindOperator: + return "Operator" + case SymbolKindTypeParameter: + return "TypeParameter" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// SymbolTag symbol tags are extra annotations that tweak the rendering of a symbol. +// +// @since 3.16.0. +type SymbolTag float64 + +// list of SymbolTag. +const ( + // SymbolTagDeprecated render a symbol as obsolete, usually using a strike-out. + SymbolTagDeprecated SymbolTag = 1 +) + +// String returns a string representation of the SymbolTag. +func (k SymbolTag) String() string { + switch k { + case SymbolTagDeprecated: + return "Deprecated" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// DocumentSymbol represents programming constructs like variables, classes, interfaces etc. that appear in a document. Document symbols can be +// hierarchical and they have two ranges: one that encloses its definition and one that points to its most interesting range, +// e.g. the range of an identifier. +type DocumentSymbol struct { + // Name is the name of this symbol. Will be displayed in the user interface and therefore must not be + // an empty string or a string only consisting of white spaces. + Name string `json:"name"` + + // Detail is the more detail for this symbol, e.g the signature of a function. + Detail string `json:"detail,omitempty"` + + // Kind is the kind of this symbol. + Kind SymbolKind `json:"kind"` + + // Tags for this document symbol. + // + // @since 3.16.0. + Tags []SymbolTag `json:"tags,omitempty"` + + // Deprecated indicates if this symbol is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // Range is the range enclosing this symbol not including leading/trailing whitespace but everything else + // like comments. This information is typically used to determine if the clients cursor is + // inside the symbol to reveal in the symbol in the UI. + Range Range `json:"range"` + + // SelectionRange is the range that should be selected and revealed when this symbol is being picked, e.g the name of a function. + // Must be contained by the `range`. + SelectionRange Range `json:"selectionRange"` + + // Children children of this symbol, e.g. properties of a class. + Children []DocumentSymbol `json:"children,omitempty"` +} + +// SymbolInformation represents information about programming constructs like variables, classes, +// interfaces etc. +type SymbolInformation struct { + // Name is the name of this symbol. + Name string `json:"name"` + + // Kind is the kind of this symbol. + Kind SymbolKind `json:"kind"` + + // Tags for this completion item. + // + // @since 3.16.0. + Tags []SymbolTag `json:"tags,omitempty"` + + // Deprecated indicates if this symbol is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // Location is the location of this symbol. The location's range is used by a tool + // to reveal the location in the editor. If the symbol is selected in the + // tool the range's start information is used to position the cursor. So + // the range usually spans more then the actual symbol's name and does + // normally include things like visibility modifiers. + // + // The range doesn't have to denote a node range in the sense of a abstract + // syntax tree. It can therefore not be used to re-construct a hierarchy of + // the symbols. + Location Location `json:"location"` + + // ContainerName is the name of the symbol containing this symbol. This information is for + // user interface purposes (e.g. to render a qualifier in the user interface + // if necessary). It can't be used to re-infer a hierarchy for the document + // symbols. + ContainerName string `json:"containerName,omitempty"` +} + +// CodeActionParams params for the CodeActionRequest. +type CodeActionParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document in which the command was invoked. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Context carrying additional information. + Context CodeActionContext `json:"context"` + + // Range is the range for which the command was invoked. + Range Range `json:"range"` +} + +// CodeActionKind is the code action kind values the client supports. When this +// property exists the client also guarantees that it will +// handle values outside its set gracefully and falls back +// to a default value when unknown. +type CodeActionKind string + +// A set of predefined code action kinds. +const ( + // QuickFix base kind for quickfix actions: 'quickfix'. + QuickFix CodeActionKind = "quickfix" + + // Refactor base kind for refactoring actions: 'refactor'. + Refactor CodeActionKind = "refactor" + + // RefactorExtract base kind for refactoring extraction actions: 'refactor.extract' + // + // Example extract actions: + // + // - Extract method + // - Extract function + // - Extract variable + // - Extract interface from class + // - ... + RefactorExtract CodeActionKind = "refactor.extract" + + // RefactorInline base kind for refactoring inline actions: 'refactor.inline' + // + // Example inline actions: + // + // - Inline function + // - Inline variable + // - Inline constant + // - ... + RefactorInline CodeActionKind = "refactor.inline" + + // RefactorRewrite base kind for refactoring rewrite actions: 'refactor.rewrite' + // + // Example rewrite actions: + // + // - Convert JavaScript function to class + // - Add or remove parameter + // - Encapsulate field + // - Make method static + // - Move method to base class + // - ... + RefactorRewrite CodeActionKind = "refactor.rewrite" + + // Source base kind for source actions: `source` + // + // Source code actions apply to the entire file. + Source CodeActionKind = "source" + + // SourceOrganizeImports base kind for an organize imports source action: `source.organizeImports`. + SourceOrganizeImports CodeActionKind = "source.organizeImports" +) + +// CodeActionContext contains additional diagnostic information about the context in which +// a code action is run. +type CodeActionContext struct { + // Diagnostics is an array of diagnostics. + Diagnostics []Diagnostic `json:"diagnostics"` + + // Only requested kind of actions to return. + // + // Actions not of this kind are filtered out by the client before being shown. So servers + // can omit computing them. + Only []CodeActionKind `json:"only,omitempty"` +} + +// CodeAction capabilities specific to the `textDocument/codeAction`. +type CodeAction struct { + // Title is a short, human-readable, title for this code action. + Title string `json:"title"` + + // Kind is the kind of the code action. + // + // Used to filter code actions. + Kind CodeActionKind `json:"kind,omitempty"` + + // Diagnostics is the diagnostics that this code action resolves. + Diagnostics []Diagnostic `json:"diagnostics,omitempty"` + + // IsPreferred marks this as a preferred action. Preferred actions are used by the `auto fix` command and can be targeted + // by keybindings. + // + // A quick fix should be marked preferred if it properly addresses the underlying error. + // A refactoring should be marked preferred if it is the most reasonable choice of actions to take. + // + // @since 3.15.0. + IsPreferred bool `json:"isPreferred,omitempty"` + + // Disabled marks that the code action cannot currently be applied. + // + // Clients should follow the following guidelines regarding disabled code + // actions: + // + // - Disabled code actions are not shown in automatic lightbulbs code + // action menus. + // + // - Disabled actions are shown as faded out in the code action menu when + // the user request a more specific type of code action, such as + // refactorings. + // + // - If the user has a keybinding that auto applies a code action and only + // a disabled code actions are returned, the client should show the user + // an error message with `reason` in the editor. + // + // @since 3.16.0. + Disabled *CodeActionDisable `json:"disabled,omitempty"` + + // Edit is the workspace edit this code action performs. + Edit *WorkspaceEdit `json:"edit,omitempty"` + + // Command is a command this code action executes. If a code action + // provides an edit and a command, first the edit is + // executed and then the command. + Command *Command `json:"command,omitempty"` + + // Data is a data entry field that is preserved on a code action between + // a "textDocument/codeAction" and a "codeAction/resolve" request. + // + // @since 3.16.0. + Data interface{} `json:"data,omitempty"` +} + +// CodeActionDisable Disable in CodeAction. +// +// @since 3.16.0. +type CodeActionDisable struct { + // Reason human readable description of why the code action is currently + // disabled. + // + // This is displayed in the code actions UI. + Reason string `json:"reason"` +} + +// CodeActionRegistrationOptions CodeAction Registrationi options. +type CodeActionRegistrationOptions struct { + TextDocumentRegistrationOptions + + CodeActionOptions +} + +// CodeLensParams params of Code Lens request. +type CodeLensParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document to request code lens for. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// CodeLens is a code lens represents a command that should be shown along with +// source text, like the number of references, a way to run tests, etc. +// +// A code lens is _unresolved_ when no command is associated to it. For performance +// reasons the creation of a code lens and resolving should be done in two stages. +type CodeLens struct { + // Range is the range in which this code lens is valid. Should only span a single line. + Range Range `json:"range"` + + // Command is the command this code lens represents. + Command *Command `json:"command,omitempty"` + + // Data is a data entry field that is preserved on a code lens item between + // a code lens and a code lens resolve request. + Data interface{} `json:"data,omitempty"` +} + +// CodeLensRegistrationOptions CodeLens Registration options. +type CodeLensRegistrationOptions struct { + TextDocumentRegistrationOptions + + // ResolveProvider code lens has a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// DocumentLinkParams params of Document Link request. +type DocumentLinkParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document to provide document links for. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// DocumentLink is a document link is a range in a text document that links to an internal or external resource, like another +// text document or a web site. +type DocumentLink struct { + // Range is the range this link applies to. + Range Range `json:"range"` + + // Target is the uri this link points to. If missing a resolve request is sent later. + Target DocumentURI `json:"target,omitempty"` + + // Tooltip is the tooltip text when you hover over this link. + // + // If a tooltip is provided, is will be displayed in a string that includes instructions on how to + // trigger the link, such as `{0} (ctrl + click)`. The specific instructions vary depending on OS, + // user settings, and localization. + // + // @since 3.15.0. + Tooltip string `json:"tooltip,omitempty"` + + // Data is a data entry field that is preserved on a document link between a + // DocumentLinkRequest and a DocumentLinkResolveRequest. + Data interface{} `json:"data,omitempty"` +} + +// DocumentColorParams params of Document Color request. +type DocumentColorParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// ColorInformation response of Document Color request. +type ColorInformation struct { + // Range is the range in the document where this color appears. + Range Range `json:"range"` + + // Color is the actual color value for this color range. + Color Color `json:"color"` +} + +// Color represents a color in RGBA space. +type Color struct { + // Alpha is the alpha component of this color in the range [0-1]. + Alpha float64 `json:"alpha"` + + // Blue is the blue component of this color in the range [0-1]. + Blue float64 `json:"blue"` + + // Green is the green component of this color in the range [0-1]. + Green float64 `json:"green"` + + // Red is the red component of this color in the range [0-1]. + Red float64 `json:"red"` +} + +// ColorPresentationParams params of Color Presentation request. +type ColorPresentationParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Color is the color information to request presentations for. + Color Color `json:"color"` + + // Range is the range where the color would be inserted. Serves as a context. + Range Range `json:"range"` +} + +// ColorPresentation response of Color Presentation request. +type ColorPresentation struct { + // Label is the label of this color presentation. It will be shown on the color + // picker header. By default this is also the text that is inserted when selecting + // this color presentation. + Label string `json:"label"` + + // TextEdit an edit which is applied to a document when selecting + // this presentation for the color. When `falsy` the label is used. + TextEdit *TextEdit `json:"textEdit,omitempty"` + + // AdditionalTextEdits an optional array of additional [text edits](#TextEdit) that are applied when + // selecting this color presentation. Edits must not overlap with the main [edit](#ColorPresentation.textEdit) nor with themselves. + AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` +} + +// DocumentFormattingParams params of Document Formatting request. +type DocumentFormattingParams struct { + WorkDoneProgressParams + + // Options is the format options. + Options FormattingOptions `json:"options"` + + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// FormattingOptions value-object describing what options formatting should use. +type FormattingOptions struct { + // InsertSpaces prefer spaces over tabs. + InsertSpaces bool `json:"insertSpaces"` + + // TabSize size of a tab in spaces. + TabSize uint32 `json:"tabSize"` + + // TrimTrailingWhitespace trim trailing whitespaces on a line. + // + // @since 3.15.0. + TrimTrailingWhitespace bool `json:"trimTrailingWhitespace,omitempty"` + + // InsertFinalNewlines insert a newline character at the end of the file if one does not exist. + // + // @since 3.15.0. + InsertFinalNewline bool `json:"insertFinalNewline,omitempty"` + + // TrimFinalNewlines trim all newlines after the final newline at the end of the file. + // + // @since 3.15.0. + TrimFinalNewlines bool `json:"trimFinalNewlines,omitempty"` + + // Key is the signature for further properties. + Key map[string]interface{} `json:"key,omitempty"` // bool | int32 | string +} + +// DocumentRangeFormattingParams params of Document Range Formatting request. +type DocumentRangeFormattingParams struct { + WorkDoneProgressParams + + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Range is the range to format + Range Range `json:"range"` + + // Options is the format options. + Options FormattingOptions `json:"options"` +} + +// DocumentOnTypeFormattingParams params of Document on Type Formatting request. +type DocumentOnTypeFormattingParams struct { + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Position is the position at which this request was sent. + Position Position `json:"position"` + + // Ch is the character that has been typed. + Ch string `json:"ch"` + + // Options is the format options. + Options FormattingOptions `json:"options"` +} + +// DocumentOnTypeFormattingRegistrationOptions DocumentOnTypeFormatting Registration options. +type DocumentOnTypeFormattingRegistrationOptions struct { + TextDocumentRegistrationOptions + + // FirstTriggerCharacter a character on which formatting should be triggered, like `}`. + FirstTriggerCharacter string `json:"firstTriggerCharacter"` + + // MoreTriggerCharacter a More trigger characters. + MoreTriggerCharacter []string `json:"moreTriggerCharacter"` +} + +// RenameParams params of Rename request. +type RenameParams struct { + TextDocumentPositionParams + PartialResultParams + + // NewName is the new name of the symbol. If the given name is not valid the + // request must return a [ResponseError](#ResponseError) with an + // appropriate message set. + NewName string `json:"newName"` +} + +// RenameRegistrationOptions Rename Registration options. +type RenameRegistrationOptions struct { + TextDocumentRegistrationOptions + + // PrepareProvider is the renames should be checked and tested for validity before being executed. + PrepareProvider bool `json:"prepareProvider,omitempty"` +} + +// PrepareRenameParams params of PrepareRenameParams request. +// +// @since 3.15.0. +type PrepareRenameParams struct { + TextDocumentPositionParams +} + +// FoldingRangeParams params of Folding Range request. +type FoldingRangeParams struct { + TextDocumentPositionParams + PartialResultParams +} + +// FoldingRangeKind is the enum of known range kinds. +type FoldingRangeKind string + +const ( + // CommentFoldingRange is the folding range for a comment. + CommentFoldingRange FoldingRangeKind = "comment" + + // ImportsFoldingRange is the folding range for a imports or includes. + ImportsFoldingRange FoldingRangeKind = "imports" + + // RegionFoldingRange is the folding range for a region (e.g. `#region`). + RegionFoldingRange FoldingRangeKind = "region" +) + +// FoldingRange capabilities specific to `textDocument/foldingRange` requests. +// +// @since 3.10.0. +type FoldingRange struct { + // StartLine is the zero-based line number from where the folded range starts. + StartLine uint32 `json:"startLine"` + + // StartCharacter is the zero-based character offset from where the folded range starts. If not defined, defaults to the length of the start line. + StartCharacter uint32 `json:"startCharacter,omitempty"` + + // EndLine is the zero-based line number where the folded range ends. + EndLine uint32 `json:"endLine"` + + // EndCharacter is the zero-based character offset before the folded range ends. If not defined, defaults to the length of the end line. + EndCharacter uint32 `json:"endCharacter,omitempty"` + + // Kind describes the kind of the folding range such as `comment' or 'region'. The kind + // is used to categorize folding ranges and used by commands like 'Fold all comments'. + // See FoldingRangeKind for an enumeration of standardized kinds. + Kind FoldingRangeKind `json:"kind,omitempty"` +} diff --git a/vendor/go.lsp.dev/protocol/log.go b/vendor/go.lsp.dev/protocol/log.go new file mode 100644 index 00000000000..0b7c9aafc69 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/log.go @@ -0,0 +1,156 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + "time" + + "go.lsp.dev/jsonrpc2" +) + +// loggingStream represents a logging of jsonrpc2.Stream. +type loggingStream struct { + stream jsonrpc2.Stream + log io.Writer + logMu sync.Mutex +} + +// LoggingStream returns a stream that does LSP protocol logging. +func LoggingStream(stream jsonrpc2.Stream, w io.Writer) jsonrpc2.Stream { + return &loggingStream{ + stream: stream, + log: w, + } +} + +// Read implements jsonrpc2.Stream.Read. +func (s *loggingStream) Read(ctx context.Context) (jsonrpc2.Message, int64, error) { + msg, count, err := s.stream.Read(ctx) + if err == nil { + s.logCommon(msg, true) + } + + return msg, count, err +} + +// Write implements jsonrpc2.Stream.Write. +func (s *loggingStream) Write(ctx context.Context, msg jsonrpc2.Message) (int64, error) { + s.logCommon(msg, false) + count, err := s.stream.Write(ctx, msg) + + return count, err +} + +// Close implements jsonrpc2.Stream.Close. +func (s *loggingStream) Close() error { + return s.stream.Close() +} + +type req struct { + method string + start time.Time +} + +type mapped struct { + mu sync.Mutex + clientCalls map[string]req + serverCalls map[string]req +} + +var maps = &mapped{ + mu: sync.Mutex{}, + clientCalls: make(map[string]req), + serverCalls: make(map[string]req), +} + +// these 4 methods are each used exactly once, but it seemed +// better to have the encapsulation rather than ad hoc mutex +// code in 4 places. +func (m *mapped) client(id string) req { + m.mu.Lock() + v := m.clientCalls[id] + delete(m.clientCalls, id) + m.mu.Unlock() + + return v +} + +func (m *mapped) server(id string) req { + m.mu.Lock() + v := m.serverCalls[id] + delete(m.serverCalls, id) + m.mu.Unlock() + + return v +} + +func (m *mapped) setClient(id string, r req) { + m.mu.Lock() + m.clientCalls[id] = r + m.mu.Unlock() +} + +func (m *mapped) setServer(id string, r req) { + m.mu.Lock() + m.serverCalls[id] = r + m.mu.Unlock() +} + +const eor = "\r\n\r\n\r\n" + +func (s *loggingStream) logCommon(msg jsonrpc2.Message, isRead bool) { + if msg == nil || s.log == nil { + return + } + + s.logMu.Lock() + + direction, pastTense := "Received", "Received" + get, set := maps.client, maps.setServer + if isRead { + direction, pastTense = "Sending", "Sent" + get, set = maps.server, maps.setClient + } + + tm := time.Now() + tmfmt := tm.Format("15:04:05.000 PM") + + var buf bytes.Buffer + fmt.Fprintf(&buf, "[Trace - %s] ", tmfmt) // common beginning + + switch msg := msg.(type) { + case *jsonrpc2.Call: + id := fmt.Sprint(msg.ID()) + fmt.Fprintf(&buf, "%s request '%s - (%s)'.\n", direction, msg.Method(), id) + fmt.Fprintf(&buf, "Params: %s%s", msg.Params(), eor) + set(id, req{method: msg.Method(), start: tm}) + + case *jsonrpc2.Notification: + fmt.Fprintf(&buf, "%s notification '%s'.\n", direction, msg.Method()) + fmt.Fprintf(&buf, "Params: %s%s", msg.Params(), eor) + + case *jsonrpc2.Response: + id := fmt.Sprint(msg.ID()) + if err := msg.Err(); err != nil { + fmt.Fprintf(s.log, "[Error - %s] %s #%s %s%s", pastTense, tmfmt, id, err, eor) + + return + } + + cc := get(id) + elapsed := tm.Sub(cc.start) + fmt.Fprintf(&buf, "%s response '%s - (%s)' in %dms.\n", + direction, cc.method, id, elapsed/time.Millisecond) + fmt.Fprintf(&buf, "Result: %s%s", msg.Result(), eor) + } + + s.log.Write(buf.Bytes()) + + s.logMu.Unlock() +} diff --git a/vendor/go.lsp.dev/protocol/progress.go b/vendor/go.lsp.dev/protocol/progress.go new file mode 100644 index 00000000000..d1a2e9f6d7a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/progress.go @@ -0,0 +1,119 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// WorkDoneProgressKind kind of WorkDoneProgress. +// +// @since 3.15.0. +type WorkDoneProgressKind string + +// list of WorkDoneProgressKind. +const ( + // WorkDoneProgressKindBegin kind of WorkDoneProgressBegin. + WorkDoneProgressKindBegin WorkDoneProgressKind = "begin" + + // WorkDoneProgressKindReport kind of WorkDoneProgressReport. + WorkDoneProgressKindReport WorkDoneProgressKind = "report" + + // WorkDoneProgressKindEnd kind of WorkDoneProgressEnd. + WorkDoneProgressKindEnd WorkDoneProgressKind = "end" +) + +// WorkDoneProgressBegin is the to start progress reporting a "$/progress" notification. +// +// @since 3.15.0. +type WorkDoneProgressBegin struct { + // Kind is the kind of WorkDoneProgressBegin. + // + // It must be WorkDoneProgressKindBegin. + Kind WorkDoneProgressKind `json:"kind"` + + // Title mandatory title of the progress operation. Used to briefly inform about + // the kind of operation being performed. + // + // Examples: "Indexing" or "Linking dependencies". + Title string `json:"title"` + + // Cancellable controls if a cancel button should show to allow the user to cancel the + // long running operation. Clients that don't support cancellation are allowed + // to ignore the setting. + Cancellable bool `json:"cancellable,omitempty"` + + // Message is optional, more detailed associated progress message. Contains + // complementary information to the `title`. + // + // Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". + // If unset, the previous progress message (if any) is still valid. + Message string `json:"message,omitempty"` + + // Percentage is optional progress percentage to display (value 100 is considered 100%). + // If not provided infinite progress is assumed and clients are allowed + // to ignore the `percentage` value in subsequent in report notifications. + // + // The value should be steadily rising. Clients are free to ignore values + // that are not following this rule. + Percentage uint32 `json:"percentage,omitempty"` +} + +// WorkDoneProgressReport is the reporting progress is done. +// +// @since 3.15.0. +type WorkDoneProgressReport struct { + // Kind is the kind of WorkDoneProgressReport. + // + // It must be WorkDoneProgressKindReport. + Kind WorkDoneProgressKind `json:"kind"` + + // Cancellable controls enablement state of a cancel button. + // + // Clients that don't support cancellation or don't support controlling the button's + // enablement state are allowed to ignore the property. + Cancellable bool `json:"cancellable,omitempty"` + + // Message is optional, more detailed associated progress message. Contains + // complementary information to the `title`. + // + // Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". + // If unset, the previous progress message (if any) is still valid. + Message string `json:"message,omitempty"` + + // Percentage is optional progress percentage to display (value 100 is considered 100%). + // If not provided infinite progress is assumed and clients are allowed + // to ignore the `percentage` value in subsequent in report notifications. + // + // The value should be steadily rising. Clients are free to ignore values + // that are not following this rule. + Percentage uint32 `json:"percentage,omitempty"` +} + +// WorkDoneProgressEnd is the signaling the end of a progress reporting is done. +// +// @since 3.15.0. +type WorkDoneProgressEnd struct { + // Kind is the kind of WorkDoneProgressEnd. + // + // It must be WorkDoneProgressKindEnd. + Kind WorkDoneProgressKind `json:"kind"` + + // Message is optional, a final message indicating to for example indicate the outcome + // of the operation. + Message string `json:"message,omitempty"` +} + +// WorkDoneProgressParams is a parameter property of report work done progress. +// +// @since 3.15.0. +type WorkDoneProgressParams struct { + // WorkDoneToken an optional token that a server can use to report work done progress. + WorkDoneToken *ProgressToken `json:"workDoneToken,omitempty"` +} + +// PartialResultParams is the parameter literal used to pass a partial result token. +// +// @since 3.15.0. +type PartialResultParams struct { + // PartialResultToken an optional token that a server can use to report partial results + // (for example, streaming) to the client. + PartialResultToken *ProgressToken `json:"partialResultToken,omitempty"` +} diff --git a/vendor/go.lsp.dev/protocol/protocol.go b/vendor/go.lsp.dev/protocol/protocol.go new file mode 100644 index 00000000000..7d90b255573 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/protocol.go @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "context" + + "go.uber.org/zap" + + "go.lsp.dev/jsonrpc2" +) + +// NewServer returns the context in which client is embedded, jsonrpc2.Conn, and the Client. +func NewServer(ctx context.Context, server Server, stream jsonrpc2.Stream, logger *zap.Logger) (context.Context, jsonrpc2.Conn, Client) { + conn := jsonrpc2.NewConn(stream) + cliint := ClientDispatcher(conn, logger.Named("client")) + ctx = WithClient(ctx, cliint) + + conn.Go(ctx, + Handlers( + ServerHandler(server, jsonrpc2.MethodNotFoundHandler), + ), + ) + + return ctx, conn, cliint +} + +// NewClient returns the context in which Client is embedded, jsonrpc2.Conn, and the Server. +func NewClient(ctx context.Context, client Client, stream jsonrpc2.Stream, logger *zap.Logger) (context.Context, jsonrpc2.Conn, Server) { + ctx = WithClient(ctx, client) + + conn := jsonrpc2.NewConn(stream) + conn.Go(ctx, + Handlers( + ClientHandler(client, jsonrpc2.MethodNotFoundHandler), + ), + ) + server := ServerDispatcher(conn, logger.Named("server")) + + return ctx, conn, server +} diff --git a/vendor/go.lsp.dev/protocol/registration.go b/vendor/go.lsp.dev/protocol/registration.go new file mode 100644 index 00000000000..a2abb43801a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/registration.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// Registration general parameters to register for a capability. +type Registration struct { + // ID is the id used to register the request. The id can be used to deregister + // the request again. + ID string `json:"id"` + + // Method is the method / capability to register for. + Method string `json:"method"` + + // RegisterOptions options necessary for the registration. + RegisterOptions interface{} `json:"registerOptions,omitempty"` +} + +// RegistrationParams params of Register Capability. +type RegistrationParams struct { + Registrations []Registration `json:"registrations"` +} + +// TextDocumentRegistrationOptions TextDocumentRegistration options. +type TextDocumentRegistrationOptions struct { + // DocumentSelector a document selector to identify the scope of the registration. If set to null + // the document selector provided on the client side will be used. + DocumentSelector DocumentSelector `json:"documentSelector"` +} + +// Unregistration general parameters to unregister a capability. +type Unregistration struct { + // ID is the id used to unregister the request or notification. Usually an id + // provided during the register request. + ID string `json:"id"` + + // Method is the method / capability to unregister for. + Method string `json:"method"` +} + +// UnregistrationParams params of Unregistration. +type UnregistrationParams struct { + Unregisterations []Unregistration `json:"unregisterations"` +} diff --git a/vendor/go.lsp.dev/protocol/selectionrange.go b/vendor/go.lsp.dev/protocol/selectionrange.go new file mode 100644 index 00000000000..c4cd16913af --- /dev/null +++ b/vendor/go.lsp.dev/protocol/selectionrange.go @@ -0,0 +1,110 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// SelectionRangeProviderOptions selection range provider options interface. +type SelectionRangeProviderOptions interface{} + +// SelectionRange represents a selection range represents a part of a selection hierarchy. +// +// A selection range may have a parent selection range that contains it. +// +// @since 3.15.0. +type SelectionRange struct { + // Range is the Range of this selection range. + Range Range `json:"range"` + + // Parent is the parent selection range containing this range. Therefore `parent.range` must contain this Range. + Parent *SelectionRange `json:"parent,omitempty"` +} + +// EnableSelectionRange is the whether the selection range. +type EnableSelectionRange bool + +// compile time check whether the EnableSelectionRange implements a SelectionRangeProviderOptions interface. +var _ SelectionRangeProviderOptions = (*EnableSelectionRange)(nil) + +// Value implements SelectionRangeProviderOptions interface. +func (v EnableSelectionRange) Value() interface{} { + return bool(v) +} + +// NewEnableSelectionRange returns the new EnableSelectionRange underlying types SelectionRangeProviderOptions. +func NewEnableSelectionRange(enable bool) SelectionRangeProviderOptions { + v := EnableSelectionRange(enable) + + return &v +} + +// SelectionRangeOptions is the server capability of selection range. +type SelectionRangeOptions struct { + WorkDoneProgressOptions +} + +// compile time check whether the EnableSelectionRange implements a SelectionRangeProviderOptions interface. +var _ SelectionRangeProviderOptions = (*EnableSelectionRange)(nil) + +// Value implements SelectionRangeProviderOptions interface. +func (v *SelectionRangeOptions) Value() interface{} { + return v +} + +// NewSelectionRangeOptions returns the new SelectionRangeOptions underlying types SelectionRangeProviderOptions. +func NewSelectionRangeOptions(enableWorkDoneProgress bool) SelectionRangeProviderOptions { + v := SelectionRangeOptions{ + WorkDoneProgressOptions: WorkDoneProgressOptions{ + WorkDoneProgress: enableWorkDoneProgress, + }, + } + + return &v +} + +// SelectionRangeRegistrationOptions is the server capability of selection range registration. +type SelectionRangeRegistrationOptions struct { + SelectionRangeOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +// compile time check whether the SelectionRangeRegistrationOptions implements a SelectionRangeProviderOptions interface. +var _ SelectionRangeProviderOptions = (*SelectionRangeRegistrationOptions)(nil) + +// Value implements SelectionRangeProviderOptions interface. +func (v *SelectionRangeRegistrationOptions) Value() interface{} { + return v +} + +// NewSelectionRangeRegistrationOptions returns the new SelectionRangeRegistrationOptions underlying types SelectionRangeProviderOptions. +func NewSelectionRangeRegistrationOptions(enableWorkDoneProgress bool, selector DocumentSelector, id string) SelectionRangeProviderOptions { + v := SelectionRangeRegistrationOptions{ + SelectionRangeOptions: SelectionRangeOptions{ + WorkDoneProgressOptions: WorkDoneProgressOptions{ + WorkDoneProgress: enableWorkDoneProgress, + }, + }, + TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{ + DocumentSelector: selector, + }, + StaticRegistrationOptions: StaticRegistrationOptions{ + ID: id, + }, + } + + return &v +} + +// SelectionRangeParams represents a parameter literal used in selection range requests. +// +// @since 3.15.0. +type SelectionRangeParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Positions is the positions inside the text document. + Positions []Position `json:"positions"` +} diff --git a/vendor/go.lsp.dev/protocol/semantic_token.go b/vendor/go.lsp.dev/protocol/semantic_token.go new file mode 100644 index 00000000000..c2d1f3a4db5 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/semantic_token.go @@ -0,0 +1,179 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// SemanticTokenTypes represents a type of semantic token. +// +// @since 3.16.0. +type SemanticTokenTypes string + +// list of SemanticTokenTypes. +const ( + SemanticTokenNamespace SemanticTokenTypes = "namespace" + + // Represents a generic type. Acts as a fallback for types which + // can't be mapped to a specific type like class or enum. + SemanticTokenType SemanticTokenTypes = "type" + SemanticTokenClass SemanticTokenTypes = "class" + SemanticTokenEnum SemanticTokenTypes = "enum" + SemanticTokenInterface SemanticTokenTypes = "interface" + SemanticTokenStruct SemanticTokenTypes = "struct" + SemanticTokenTypeParameter SemanticTokenTypes = "typeParameter" + SemanticTokenParameter SemanticTokenTypes = "parameter" + SemanticTokenVariable SemanticTokenTypes = "variable" + SemanticTokenProperty SemanticTokenTypes = "property" + SemanticTokenEnumMember SemanticTokenTypes = "enumMember" + SemanticTokenEvent SemanticTokenTypes = "event" + SemanticTokenFunction SemanticTokenTypes = "function" + SemanticTokenMethod SemanticTokenTypes = "method" + SemanticTokenMacro SemanticTokenTypes = "macro" + SemanticTokenKeyword SemanticTokenTypes = "keyword" + SemanticTokenModifier SemanticTokenTypes = "modifier" + SemanticTokenComment SemanticTokenTypes = "comment" + SemanticTokenString SemanticTokenTypes = "string" + SemanticTokenNumber SemanticTokenTypes = "number" + SemanticTokenRegexp SemanticTokenTypes = "regexp" + SemanticTokenOperator SemanticTokenTypes = "operator" +) + +// SemanticTokenModifiers represents a modifiers of semantic token. +// +// @since 3.16.0. +type SemanticTokenModifiers string + +// list of SemanticTokenModifiers. +const ( + SemanticTokenModifierDeclaration SemanticTokenModifiers = "declaration" + SemanticTokenModifierDefinition SemanticTokenModifiers = "definition" + SemanticTokenModifierReadonly SemanticTokenModifiers = "readonly" + SemanticTokenModifierStatic SemanticTokenModifiers = "static" + SemanticTokenModifierDeprecated SemanticTokenModifiers = "deprecated" + SemanticTokenModifierAbstract SemanticTokenModifiers = "abstract" + SemanticTokenModifierAsync SemanticTokenModifiers = "async" + SemanticTokenModifierModification SemanticTokenModifiers = "modification" + SemanticTokenModifierDocumentation SemanticTokenModifiers = "documentation" + SemanticTokenModifierDefaultLibrary SemanticTokenModifiers = "defaultLibrary" +) + +// TokenFormat is an additional token format capability to allow future extensions of the format. +// +// @since 3.16.0. +type TokenFormat string + +// TokenFormatRelative described using relative positions. +const TokenFormatRelative TokenFormat = "relative" + +// SemanticTokensLegend is the on the capability level types and modifiers are defined using strings. +// +// However the real encoding happens using numbers. +// +// The server therefore needs to let the client know which numbers it is using for which types and modifiers. +// +// @since 3.16.0. +type SemanticTokensLegend struct { + // TokenTypes is the token types a server uses. + TokenTypes []SemanticTokenTypes `json:"tokenTypes"` + + // TokenModifiers is the token modifiers a server uses. + TokenModifiers []SemanticTokenModifiers `json:"tokenModifiers"` +} + +// SemanticTokensParams params for the SemanticTokensFull request. +// +// @since 3.16.0. +type SemanticTokensParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// SemanticTokens is the result of SemanticTokensFull request. +// +// @since 3.16.0. +type SemanticTokens struct { + // ResultID an optional result id. If provided and clients support delta updating + // the client will include the result id in the next semantic token request. + // + // A server can then instead of computing all semantic tokens again simply + // send a delta. + ResultID string `json:"resultId,omitempty"` + + // Data is the actual tokens. + Data []uint32 `json:"data"` +} + +// SemanticTokensPartialResult is the partial result of SemanticTokensFull request. +// +// @since 3.16.0. +type SemanticTokensPartialResult struct { + // Data is the actual tokens. + Data []uint32 `json:"data"` +} + +// SemanticTokensDeltaParams params for the SemanticTokensFullDelta request. +// +// @since 3.16.0. +type SemanticTokensDeltaParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // PreviousResultID is the result id of a previous response. + // + // The result Id can either point to a full response or a delta response depending on what was received last. + PreviousResultID string `json:"previousResultId"` +} + +// SemanticTokensDelta result of SemanticTokensFullDelta request. +// +// @since 3.16.0. +type SemanticTokensDelta struct { + // ResultID is the result id. + // + // This field is readonly. + ResultID string `json:"resultId,omitempty"` + + // Edits is the semantic token edits to transform a previous result into a new + // result. + Edits []SemanticTokensEdit `json:"edits"` +} + +// SemanticTokensDeltaPartialResult is the partial result of SemanticTokensFullDelta request. +// +// @since 3.16.0. +type SemanticTokensDeltaPartialResult struct { + Edits []SemanticTokensEdit `json:"edits"` +} + +// SemanticTokensEdit is the semantic token edit. +// +// @since 3.16.0. +type SemanticTokensEdit struct { + // Start is the start offset of the edit. + Start uint32 `json:"start"` + + // DeleteCount is the count of elements to remove. + DeleteCount uint32 `json:"deleteCount"` + + // Data is the elements to insert. + Data []uint32 `json:"data,omitempty"` +} + +// SemanticTokensRangeParams params for the SemanticTokensRange request. +// +// @since 3.16.0. +type SemanticTokensRangeParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Range is the range the semantic tokens are requested for. + Range Range `json:"range"` +} diff --git a/vendor/go.lsp.dev/protocol/server.go b/vendor/go.lsp.dev/protocol/server.go new file mode 100644 index 00000000000..6f96161ccb2 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/server.go @@ -0,0 +1,1892 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "bytes" + "context" + "fmt" + + "github.com/segmentio/encoding/json" + "go.uber.org/zap" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/pkg/xcontext" +) + +// ServerDispatcher returns a Server that dispatches LSP requests across the +// given jsonrpc2 connection. +func ServerDispatcher(conn jsonrpc2.Conn, logger *zap.Logger) Server { + return &server{ + Conn: conn, + logger: logger, + } +} + +// ServerHandler jsonrpc2.Handler of Language Server Prococol Server. +//nolint:unparam +func ServerHandler(server Server, handler jsonrpc2.Handler) jsonrpc2.Handler { + h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error { + if ctx.Err() != nil { + xctx := xcontext.Detach(ctx) + + return reply(xctx, nil, ErrRequestCancelled) + } + handled, err := serverDispatch(ctx, server, reply, req) + if handled || err != nil { + return err + } + + // TODO: This code is wrong, it ignores handler and assumes non standard + // request handles everything + // non standard request should just be a layered handler. + var params interface{} + if err := json.Unmarshal(req.Params(), ¶ms); err != nil { + return replyParseError(ctx, reply, err) + } + + resp, err := server.Request(ctx, req.Method(), params) + + return reply(ctx, resp, err) + } + + return h +} + +// serverDispatch implements jsonrpc2.Handler. +//nolint:gocognit,funlen,gocyclo,cyclop +func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, req jsonrpc2.Request) (handled bool, err error) { + if ctx.Err() != nil { + return true, reply(ctx, nil, ErrRequestCancelled) + } + + dec := json.NewDecoder(bytes.NewReader(req.Params())) + logger := LoggerFromContext(ctx) + + switch req.Method() { + case MethodInitialize: // request + defer logger.Debug(MethodInitialize, zap.Error(err)) + + var params InitializeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Initialize(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodInitialized: // notification + defer logger.Debug(MethodInitialized, zap.Error(err)) + + var params InitializedParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.Initialized(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodShutdown: // request + defer logger.Debug(MethodShutdown, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.Shutdown(ctx) + + return true, reply(ctx, nil, err) + + case MethodExit: // notification + defer logger.Debug(MethodExit, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.Exit(ctx) + + return true, reply(ctx, nil, err) + + case MethodWorkDoneProgressCancel: // notification + defer logger.Debug(MethodWorkDoneProgressCancel, zap.Error(err)) + + var params WorkDoneProgressCancelParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.WorkDoneProgressCancel(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodLogTrace: // notification + defer logger.Debug(MethodLogTrace, zap.Error(err)) + + var params LogTraceParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.LogTrace(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodSetTrace: // notification + defer logger.Debug(MethodSetTrace, zap.Error(err)) + + var params SetTraceParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.SetTrace(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentCodeAction: // request + defer logger.Debug(MethodTextDocumentCodeAction, zap.Error(err)) + + var params CodeActionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CodeAction(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentCodeLens: // request + defer logger.Debug(MethodTextDocumentCodeLens, zap.Error(err)) + + var params CodeLensParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CodeLens(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCodeLensResolve: // request + defer logger.Debug(MethodCodeLensResolve, zap.Error(err)) + + var params CodeLens + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CodeLensResolve(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentColorPresentation: // request + defer logger.Debug(MethodTextDocumentColorPresentation, zap.Error(err)) + + var params ColorPresentationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.ColorPresentation(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentCompletion: // request + defer logger.Debug(MethodTextDocumentCompletion, zap.Error(err)) + + var params CompletionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Completion(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCompletionItemResolve: // request + defer logger.Debug(MethodCompletionItemResolve, zap.Error(err)) + + var params CompletionItem + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CompletionResolve(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDeclaration: // request + defer logger.Debug(MethodTextDocumentDeclaration, zap.Error(err)) + + var params DeclarationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Declaration(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDefinition: // request + defer logger.Debug(MethodTextDocumentDefinition, zap.Error(err)) + + var params DefinitionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Definition(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDidChange: // notification + defer logger.Debug(MethodTextDocumentDidChange, zap.Error(err)) + + var params DidChangeTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChange(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceDidChangeConfiguration: // notification + defer logger.Debug(MethodWorkspaceDidChangeConfiguration, zap.Error(err)) + + var params DidChangeConfigurationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChangeConfiguration(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceDidChangeWatchedFiles: // notification + defer logger.Debug(MethodWorkspaceDidChangeWatchedFiles, zap.Error(err)) + + var params DidChangeWatchedFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChangeWatchedFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceDidChangeWorkspaceFolders: // notification + defer logger.Debug(MethodWorkspaceDidChangeWorkspaceFolders, zap.Error(err)) + + var params DidChangeWorkspaceFoldersParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChangeWorkspaceFolders(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDidClose: // notification + defer logger.Debug(MethodTextDocumentDidClose, zap.Error(err)) + + var params DidCloseTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidClose(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDidOpen: // notification + defer logger.Debug(MethodTextDocumentDidOpen, zap.Error(err)) + + var params DidOpenTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidOpen(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDidSave: // notification + defer logger.Debug(MethodTextDocumentDidSave, zap.Error(err)) + + var params DidSaveTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidSave(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDocumentColor: // request + defer logger.Debug(MethodTextDocumentDocumentColor, zap.Error(err)) + + var params DocumentColorParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentColor(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDocumentHighlight: // request + defer logger.Debug(MethodTextDocumentDocumentHighlight, zap.Error(err)) + + var params DocumentHighlightParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentHighlight(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDocumentLink: // request + defer logger.Debug(MethodTextDocumentDocumentLink, zap.Error(err)) + + var params DocumentLinkParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentLink(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDocumentLinkResolve: // request + defer logger.Debug(MethodDocumentLinkResolve, zap.Error(err)) + + var params DocumentLink + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentLinkResolve(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDocumentSymbol: // request + defer logger.Debug(MethodTextDocumentDocumentSymbol, zap.Error(err)) + + var params DocumentSymbolParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentSymbol(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceExecuteCommand: // request + defer logger.Debug(MethodWorkspaceExecuteCommand, zap.Error(err)) + + var params ExecuteCommandParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.ExecuteCommand(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentFoldingRange: // request + defer logger.Debug(MethodTextDocumentFoldingRange, zap.Error(err)) + + var params FoldingRangeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.FoldingRanges(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentFormatting: // request + defer logger.Debug(MethodTextDocumentFormatting, zap.Error(err)) + + var params DocumentFormattingParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Formatting(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentHover: // request + defer logger.Debug(MethodTextDocumentHover, zap.Error(err)) + + var params HoverParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Hover(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentImplementation: // request + defer logger.Debug(MethodTextDocumentImplementation, zap.Error(err)) + + var params ImplementationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Implementation(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentOnTypeFormatting: // request + defer logger.Debug(MethodTextDocumentOnTypeFormatting, zap.Error(err)) + + var params DocumentOnTypeFormattingParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.OnTypeFormatting(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentPrepareRename: // request + defer logger.Debug(MethodTextDocumentPrepareRename, zap.Error(err)) + + var params PrepareRenameParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.PrepareRename(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentRangeFormatting: // request + defer logger.Debug(MethodTextDocumentRangeFormatting, zap.Error(err)) + + var params DocumentRangeFormattingParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.RangeFormatting(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentReferences: // request + defer logger.Debug(MethodTextDocumentReferences, zap.Error(err)) + + var params ReferenceParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.References(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentRename: // request + defer logger.Debug(MethodTextDocumentRename, zap.Error(err)) + + var params RenameParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Rename(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentSignatureHelp: // request + defer logger.Debug(MethodTextDocumentSignatureHelp, zap.Error(err)) + + var params SignatureHelpParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SignatureHelp(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceSymbol: // request + defer logger.Debug(MethodWorkspaceSymbol, zap.Error(err)) + + var params WorkspaceSymbolParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Symbols(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentTypeDefinition: // request + defer logger.Debug(MethodTextDocumentTypeDefinition, zap.Error(err)) + + var params TypeDefinitionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.TypeDefinition(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentWillSave: // notification + defer logger.Debug(MethodTextDocumentWillSave, zap.Error(err)) + + var params WillSaveTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.WillSave(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentWillSaveWaitUntil: // request + defer logger.Debug(MethodTextDocumentWillSaveWaitUntil, zap.Error(err)) + + var params WillSaveTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillSaveWaitUntil(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodShowDocument: // request + defer logger.Debug(MethodShowDocument, zap.Error(err)) + + var params ShowDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.ShowDocument(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWillCreateFiles: // request + defer logger.Debug(MethodWillCreateFiles, zap.Error(err)) + + var params CreateFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillCreateFiles(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDidCreateFiles: // notification + defer logger.Debug(MethodDidCreateFiles, zap.Error(err)) + + var params CreateFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidCreateFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWillRenameFiles: // request + defer logger.Debug(MethodWillRenameFiles, zap.Error(err)) + + var params RenameFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillRenameFiles(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDidRenameFiles: // notification + defer logger.Debug(MethodDidRenameFiles, zap.Error(err)) + + var params RenameFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidRenameFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWillDeleteFiles: // request + defer logger.Debug(MethodWillDeleteFiles, zap.Error(err)) + + var params DeleteFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillDeleteFiles(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDidDeleteFiles: // notification + defer logger.Debug(MethodDidDeleteFiles, zap.Error(err)) + + var params DeleteFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidDeleteFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodCodeLensRefresh: // request + defer logger.Debug(MethodCodeLensRefresh, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.CodeLensRefresh(ctx) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentPrepareCallHierarchy: // request + defer logger.Debug(MethodTextDocumentPrepareCallHierarchy, zap.Error(err)) + + var params CallHierarchyPrepareParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.PrepareCallHierarchy(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCallHierarchyIncomingCalls: // request + defer logger.Debug(MethodCallHierarchyIncomingCalls, zap.Error(err)) + + var params CallHierarchyIncomingCallsParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.IncomingCalls(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCallHierarchyOutgoingCalls: // request + defer logger.Debug(MethodCallHierarchyOutgoingCalls, zap.Error(err)) + + var params CallHierarchyOutgoingCallsParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.OutgoingCalls(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensFull: // request + defer logger.Debug(MethodSemanticTokensFull, zap.Error(err)) + + var params SemanticTokensParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SemanticTokensFull(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensFullDelta: // request + defer logger.Debug(MethodSemanticTokensFullDelta, zap.Error(err)) + + var params SemanticTokensDeltaParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SemanticTokensFullDelta(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensRange: // request + defer logger.Debug(MethodSemanticTokensRange, zap.Error(err)) + + var params SemanticTokensRangeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SemanticTokensRange(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensRefresh: // request + defer logger.Debug(MethodSemanticTokensRefresh, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.SemanticTokensRefresh(ctx) + + return true, reply(ctx, nil, err) + + case MethodLinkedEditingRange: // request + defer logger.Debug(MethodLinkedEditingRange, zap.Error(err)) + + var params LinkedEditingRangeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.LinkedEditingRange(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodMoniker: // request + defer logger.Debug(MethodMoniker, zap.Error(err)) + + var params MonikerParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Moniker(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + default: + return false, nil + } +} + +// Server represents a Language Server Protocol server. +type Server interface { + Initialize(ctx context.Context, params *InitializeParams) (result *InitializeResult, err error) + Initialized(ctx context.Context, params *InitializedParams) (err error) + Shutdown(ctx context.Context) (err error) + Exit(ctx context.Context) (err error) + WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) (err error) + LogTrace(ctx context.Context, params *LogTraceParams) (err error) + SetTrace(ctx context.Context, params *SetTraceParams) (err error) + CodeAction(ctx context.Context, params *CodeActionParams) (result []CodeAction, err error) + CodeLens(ctx context.Context, params *CodeLensParams) (result []CodeLens, err error) + CodeLensResolve(ctx context.Context, params *CodeLens) (result *CodeLens, err error) + ColorPresentation(ctx context.Context, params *ColorPresentationParams) (result []ColorPresentation, err error) + Completion(ctx context.Context, params *CompletionParams) (result *CompletionList, err error) + CompletionResolve(ctx context.Context, params *CompletionItem) (result *CompletionItem, err error) + Declaration(ctx context.Context, params *DeclarationParams) (result []Location /* Declaration | DeclarationLink[] | null */, err error) + Definition(ctx context.Context, params *DefinitionParams) (result []Location /* Definition | DefinitionLink[] | null */, err error) + DidChange(ctx context.Context, params *DidChangeTextDocumentParams) (err error) + DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) (err error) + DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) (err error) + DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) (err error) + DidClose(ctx context.Context, params *DidCloseTextDocumentParams) (err error) + DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) (err error) + DidSave(ctx context.Context, params *DidSaveTextDocumentParams) (err error) + DocumentColor(ctx context.Context, params *DocumentColorParams) (result []ColorInformation, err error) + DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) (result []DocumentHighlight, err error) + DocumentLink(ctx context.Context, params *DocumentLinkParams) (result []DocumentLink, err error) + DocumentLinkResolve(ctx context.Context, params *DocumentLink) (result *DocumentLink, err error) + DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) (result []interface{} /* []SymbolInformation | []DocumentSymbol */, err error) + ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (result interface{}, err error) + FoldingRanges(ctx context.Context, params *FoldingRangeParams) (result []FoldingRange, err error) + Formatting(ctx context.Context, params *DocumentFormattingParams) (result []TextEdit, err error) + Hover(ctx context.Context, params *HoverParams) (result *Hover, err error) + Implementation(ctx context.Context, params *ImplementationParams) (result []Location, err error) + OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) (result []TextEdit, err error) + PrepareRename(ctx context.Context, params *PrepareRenameParams) (result *Range, err error) + RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) (result []TextEdit, err error) + References(ctx context.Context, params *ReferenceParams) (result []Location, err error) + Rename(ctx context.Context, params *RenameParams) (result *WorkspaceEdit, err error) + SignatureHelp(ctx context.Context, params *SignatureHelpParams) (result *SignatureHelp, err error) + Symbols(ctx context.Context, params *WorkspaceSymbolParams) (result []SymbolInformation, err error) + TypeDefinition(ctx context.Context, params *TypeDefinitionParams) (result []Location, err error) + WillSave(ctx context.Context, params *WillSaveTextDocumentParams) (err error) + WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) (result []TextEdit, err error) + ShowDocument(ctx context.Context, params *ShowDocumentParams) (result *ShowDocumentResult, err error) + WillCreateFiles(ctx context.Context, params *CreateFilesParams) (result *WorkspaceEdit, err error) + DidCreateFiles(ctx context.Context, params *CreateFilesParams) (err error) + WillRenameFiles(ctx context.Context, params *RenameFilesParams) (result *WorkspaceEdit, err error) + DidRenameFiles(ctx context.Context, params *RenameFilesParams) (err error) + WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (result *WorkspaceEdit, err error) + DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) (err error) + CodeLensRefresh(ctx context.Context) (err error) + PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) (result []CallHierarchyItem, err error) + IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) (result []CallHierarchyIncomingCall, err error) + OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) (result []CallHierarchyOutgoingCall, err error) + SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (result *SemanticTokens, err error) + SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (result interface{} /* SemanticTokens | SemanticTokensDelta */, err error) + SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (result *SemanticTokens, err error) + SemanticTokensRefresh(ctx context.Context) (err error) + LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (result *LinkedEditingRanges, err error) + Moniker(ctx context.Context, params *MonikerParams) (result []Moniker, err error) + Request(ctx context.Context, method string, params interface{}) (result interface{}, err error) +} + +// list of server methods. +const ( + // MethodCancelRequest method name of "$/cancelRequest". + MethodCancelRequest = "$/cancelRequest" + + // MethodInitialize method name of "initialize". + MethodInitialize = "initialize" + + // MethodInitialized method name of "initialized". + MethodInitialized = "initialized" + + // MethodShutdown method name of "shutdown". + MethodShutdown = "shutdown" + + // MethodExit method name of "exit". + MethodExit = "exit" + + // MethodWorkDoneProgressCancel method name of "window/workDoneProgress/cancel". + MethodWorkDoneProgressCancel = "window/workDoneProgress/cancel" + + // MethodLogTrace method name of "$/logTrace". + MethodLogTrace = "$/logTrace" + + // MethodSetTrace method name of "$/setTrace". + MethodSetTrace = "$/setTrace" + + // MethodTextDocumentCodeAction method name of "textDocument/codeAction". + MethodTextDocumentCodeAction = "textDocument/codeAction" + + // MethodTextDocumentCodeLens method name of "textDocument/codeLens". + MethodTextDocumentCodeLens = "textDocument/codeLens" + + // MethodCodeLensResolve method name of "codeLens/resolve". + MethodCodeLensResolve = "codeLens/resolve" + + // MethodTextDocumentColorPresentation method name of "textDocument/colorPresentation". + MethodTextDocumentColorPresentation = "textDocument/colorPresentation" + + // MethodTextDocumentCompletion method name of "textDocument/completion". + MethodTextDocumentCompletion = "textDocument/completion" + + // MethodCompletionItemResolve method name of "completionItem/resolve". + MethodCompletionItemResolve = "completionItem/resolve" + + // MethodTextDocumentDeclaration method name of "textDocument/declaration". + MethodTextDocumentDeclaration = "textDocument/declaration" + + // MethodTextDocumentDefinition method name of "textDocument/definition". + MethodTextDocumentDefinition = "textDocument/definition" + + // MethodTextDocumentDidChange method name of "textDocument/didChange". + MethodTextDocumentDidChange = "textDocument/didChange" + + // MethodWorkspaceDidChangeConfiguration method name of "workspace/didChangeConfiguration". + MethodWorkspaceDidChangeConfiguration = "workspace/didChangeConfiguration" + + // MethodWorkspaceDidChangeWatchedFiles method name of "workspace/didChangeWatchedFiles". + MethodWorkspaceDidChangeWatchedFiles = "workspace/didChangeWatchedFiles" + + // MethodWorkspaceDidChangeWorkspaceFolders method name of "workspace/didChangeWorkspaceFolders". + MethodWorkspaceDidChangeWorkspaceFolders = "workspace/didChangeWorkspaceFolders" + + // MethodTextDocumentDidClose method name of "textDocument/didClose". + MethodTextDocumentDidClose = "textDocument/didClose" + + // MethodTextDocumentDidOpen method name of "textDocument/didOpen". + MethodTextDocumentDidOpen = "textDocument/didOpen" + + // MethodTextDocumentDidSave method name of "textDocument/didSave". + MethodTextDocumentDidSave = "textDocument/didSave" + + // MethodTextDocumentDocumentColor method name of"textDocument/documentColor". + MethodTextDocumentDocumentColor = "textDocument/documentColor" + + // MethodTextDocumentDocumentHighlight method name of "textDocument/documentHighlight". + MethodTextDocumentDocumentHighlight = "textDocument/documentHighlight" + + // MethodTextDocumentDocumentLink method name of "textDocument/documentLink". + MethodTextDocumentDocumentLink = "textDocument/documentLink" + + // MethodDocumentLinkResolve method name of "documentLink/resolve". + MethodDocumentLinkResolve = "documentLink/resolve" + + // MethodTextDocumentDocumentSymbol method name of "textDocument/documentSymbol". + MethodTextDocumentDocumentSymbol = "textDocument/documentSymbol" + + // MethodWorkspaceExecuteCommand method name of "workspace/executeCommand". + MethodWorkspaceExecuteCommand = "workspace/executeCommand" + + // MethodTextDocumentFoldingRange method name of "textDocument/foldingRange". + MethodTextDocumentFoldingRange = "textDocument/foldingRange" + + // MethodTextDocumentFormatting method name of "textDocument/formatting". + MethodTextDocumentFormatting = "textDocument/formatting" + + // MethodTextDocumentHover method name of "textDocument/hover". + MethodTextDocumentHover = "textDocument/hover" + + // MethodTextDocumentImplementation method name of "textDocument/implementation". + MethodTextDocumentImplementation = "textDocument/implementation" + + // MethodTextDocumentOnTypeFormatting method name of "textDocument/onTypeFormatting". + MethodTextDocumentOnTypeFormatting = "textDocument/onTypeFormatting" + + // MethodTextDocumentPrepareRename method name of "textDocument/prepareRename". + MethodTextDocumentPrepareRename = "textDocument/prepareRename" + + // MethodTextDocumentRangeFormatting method name of "textDocument/rangeFormatting". + MethodTextDocumentRangeFormatting = "textDocument/rangeFormatting" + + // MethodTextDocumentReferences method name of "textDocument/references". + MethodTextDocumentReferences = "textDocument/references" + + // MethodTextDocumentRename method name of "textDocument/rename". + MethodTextDocumentRename = "textDocument/rename" + + // MethodTextDocumentSignatureHelp method name of "textDocument/signatureHelp". + MethodTextDocumentSignatureHelp = "textDocument/signatureHelp" + + // MethodWorkspaceSymbol method name of "workspace/symbol". + MethodWorkspaceSymbol = "workspace/symbol" + + // MethodTextDocumentTypeDefinition method name of "textDocument/typeDefinition". + MethodTextDocumentTypeDefinition = "textDocument/typeDefinition" + + // MethodTextDocumentWillSave method name of "textDocument/willSave". + MethodTextDocumentWillSave = "textDocument/willSave" + + // MethodTextDocumentWillSaveWaitUntil method name of "textDocument/willSaveWaitUntil". + MethodTextDocumentWillSaveWaitUntil = "textDocument/willSaveWaitUntil" + + // MethodShowDocument method name of "window/showDocument". + MethodShowDocument = "window/showDocument" + + // MethodWillCreateFiles method name of "workspace/willCreateFiles". + MethodWillCreateFiles = "workspace/willCreateFiles" + + // MethodDidCreateFiles method name of "workspace/didCreateFiles". + MethodDidCreateFiles = "workspace/didCreateFiles" + + // MethodWillRenameFiles method name of "workspace/willRenameFiles". + MethodWillRenameFiles = "workspace/willRenameFiles" + + // MethodDidRenameFiles method name of "workspace/didRenameFiles". + MethodDidRenameFiles = "workspace/didRenameFiles" + + // MethodWillDeleteFiles method name of "workspace/willDeleteFiles". + MethodWillDeleteFiles = "workspace/willDeleteFiles" + + // MethodDidDeleteFiles method name of "workspace/didDeleteFiles". + MethodDidDeleteFiles = "workspace/didDeleteFiles" + + // MethodCodeLensRefresh method name of "workspace/codeLens/refresh". + MethodCodeLensRefresh = "workspace/codeLens/refresh" + + // MethodTextDocumentPrepareCallHierarchy method name of "textDocument/prepareCallHierarchy". + MethodTextDocumentPrepareCallHierarchy = "textDocument/prepareCallHierarchy" + + // MethodCallHierarchyIncomingCalls method name of "callHierarchy/incomingCalls". + MethodCallHierarchyIncomingCalls = "callHierarchy/incomingCalls" + + // MethodCallHierarchyOutgoingCalls method name of "callHierarchy/outgoingCalls". + MethodCallHierarchyOutgoingCalls = "callHierarchy/outgoingCalls" + + // MethodSemanticTokensFull method name of "textDocument/semanticTokens/full". + MethodSemanticTokensFull = "textDocument/semanticTokens/full" + + // MethodSemanticTokensFullDelta method name of "textDocument/semanticTokens/full/delta". + MethodSemanticTokensFullDelta = "textDocument/semanticTokens/full/delta" + + // MethodSemanticTokensRange method name of "textDocument/semanticTokens/range". + MethodSemanticTokensRange = "textDocument/semanticTokens/range" + + // MethodSemanticTokensRefresh method name of "workspace/semanticTokens/refresh". + MethodSemanticTokensRefresh = "workspace/semanticTokens/refresh" + + // MethodLinkedEditingRange method name of "textDocument/linkedEditingRange". + MethodLinkedEditingRange = "textDocument/linkedEditingRange" + + // MethodMoniker method name of "textDocument/moniker". + MethodMoniker = "textDocument/moniker" +) + +// server implements a Language Server Protocol server. +type server struct { + jsonrpc2.Conn + + logger *zap.Logger +} + +var _ Server = (*server)(nil) + +// Initialize sents the request as the first request from the client to the server. +// +// If the server receives a request or notification before the initialize request it should act as follows: +// +// - For a request the response should be an error with code: -32002. The message can be picked by the server. +// - Notifications should be dropped, except for the exit notification. This will allow the exit of a server without an initialize request. +// +// Until the server has responded to the initialize request with an InitializeResult, the client +// must not send any additional requests or notifications to the server. +// In addition the server is not allowed to send any requests or notifications to the client until +// it has responded with an InitializeResult, with the exception that during the initialize request +// the server is allowed to send the notifications window/showMessage, window/logMessage and telemetry/event +// as well as the window/showMessageRequest request to the client. +func (s *server) Initialize(ctx context.Context, params *InitializeParams) (_ *InitializeResult, err error) { + s.logger.Debug("call " + MethodInitialize) + defer s.logger.Debug("end "+MethodInitialize, zap.Error(err)) + + var result *InitializeResult + if err := Call(ctx, s.Conn, MethodInitialize, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Initialized sends the notification from the client to the server after the client received the result of the +// initialize request but before the client is sending any other request or notification to the server. +// +// The server can use the initialized notification for example to dynamically register capabilities. +// The initialized notification may only be sent once. +func (s *server) Initialized(ctx context.Context, params *InitializedParams) (err error) { + s.logger.Debug("notify " + MethodInitialized) + defer s.logger.Debug("end "+MethodInitialized, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodInitialized, params) +} + +// Shutdown sents the request from the client to the server. +// +// It asks the server to shut down, but to not exit (otherwise the response might not be delivered correctly to the client). +// There is a separate exit notification that asks the server to exit. +// +// Clients must not sent any notifications other than `exit` or requests to a server to which they have sent a shutdown requests. +// If a server receives requests after a shutdown request those requests should be errored with `InvalidRequest`. +func (s *server) Shutdown(ctx context.Context) (err error) { + s.logger.Debug("call " + MethodShutdown) + defer s.logger.Debug("end "+MethodShutdown, zap.Error(err)) + + return Call(ctx, s.Conn, MethodShutdown, nil, nil) +} + +// Exit a notification to ask the server to exit its process. +// +// The server should exit with success code 0 if the shutdown request has been received before; otherwise with error code 1. +func (s *server) Exit(ctx context.Context) (err error) { + s.logger.Debug("notify " + MethodExit) + defer s.logger.Debug("end "+MethodExit, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodExit, nil) +} + +// LogTrace a notification to log the trace of the serverā€™s execution. +// +// The amount and content of these notifications depends on the current trace configuration. +// +// If trace is "off", the server should not send any logTrace notification. If trace is "message", +// the server should not add the "verbose" field in the LogTraceParams. +// +// @since 3.16.0. +func (s *server) LogTrace(ctx context.Context, params *LogTraceParams) (err error) { + s.logger.Debug("notify " + MethodLogTrace) + defer s.logger.Debug("end "+MethodLogTrace, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodLogTrace, params) +} + +// SetTrace a notification that should be used by the client to modify the trace setting of the server. +// +// @since 3.16.0. +func (s *server) SetTrace(ctx context.Context, params *SetTraceParams) (err error) { + s.logger.Debug("notify " + MethodSetTrace) + defer s.logger.Debug("end "+MethodSetTrace, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodSetTrace, params) +} + +// WorkDoneProgressCancel is the sends notification from the client to the server to cancel a progress initiated on the +// server side using the "window/workDoneProgress/create". +func (s *server) WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) (err error) { + s.logger.Debug("call " + MethodWorkDoneProgressCancel) + defer s.logger.Debug("end "+MethodWorkDoneProgressCancel, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkDoneProgressCancel, params) +} + +// CodeAction sends the request is from the client to the server to compute commands for a given text document and range. +// +// These commands are typically code fixes to either fix problems or to beautify/refactor code. The result of a `textDocument/codeAction` +// request is an array of `Command` literals which are typically presented in the user interface. +// +// To ensure that a server is useful in many clients the commands specified in a code actions should be handled by the +// server and not by the client (see `workspace/executeCommand` and `ServerCapabilities.executeCommandProvider`). +// If the client supports providing edits with a code action then the mode should be used. +func (s *server) CodeAction(ctx context.Context, params *CodeActionParams) (result []CodeAction, err error) { + s.logger.Debug("call " + MethodTextDocumentCodeAction) + defer s.logger.Debug("end "+MethodTextDocumentCodeAction, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentCodeAction, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// CodeLens sends the request from the client to the server to compute code lenses for a given text document. +func (s *server) CodeLens(ctx context.Context, params *CodeLensParams) (result []CodeLens, err error) { + s.logger.Debug("call " + MethodTextDocumentCodeLens) + defer s.logger.Debug("end "+MethodTextDocumentCodeLens, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentCodeLens, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// CodeLensResolve sends the request from the client to the server to resolve the command for a given code lens item. +func (s *server) CodeLensResolve(ctx context.Context, params *CodeLens) (_ *CodeLens, err error) { + s.logger.Debug("call " + MethodCodeLensResolve) + defer s.logger.Debug("end "+MethodCodeLensResolve, zap.Error(err)) + + var result *CodeLens + if err := Call(ctx, s.Conn, MethodCodeLensResolve, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// ColorPresentation sends the request from the client to the server to obtain a list of presentations for a color value at a given location. +// +// Clients can use the result to +// +// - modify a color reference. +// - show in a color picker and let users pick one of the presentations. +func (s *server) ColorPresentation(ctx context.Context, params *ColorPresentationParams) (result []ColorPresentation, err error) { + s.logger.Debug("call " + MethodTextDocumentColorPresentation) + defer s.logger.Debug("end "+MethodTextDocumentColorPresentation, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentColorPresentation, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Completion sends the request from the client to the server to compute completion items at a given cursor position. +// +// Completion items are presented in the IntelliSense user interface. +// If computing full completion items is expensive, servers can additionally provide a handler for the completion item resolve request (ā€˜completionItem/resolveā€™). +// +// This request is sent when a completion item is selected in the user interface. +// A typical use case is for example: the ā€˜textDocument/completionā€™ request doesnā€™t fill in the documentation property +// for returned completion items since it is expensive to compute. When the item is selected in the user interface then +// a ā€˜completionItem/resolveā€™ request is sent with the selected completion item as a parameter. +// +// The returned completion item should have the documentation property filled in. The request can delay the computation of +// the `detail` and `documentation` properties. However, properties that are needed for the initial sorting and filtering, +// like `sortText`, `filterText`, `insertText`, and `textEdit` must be provided in the `textDocument/completion` response and must not be changed during resolve. +func (s *server) Completion(ctx context.Context, params *CompletionParams) (_ *CompletionList, err error) { + s.logger.Debug("call " + MethodTextDocumentCompletion) + defer s.logger.Debug("end "+MethodTextDocumentCompletion, zap.Error(err)) + + var result *CompletionList + if err := Call(ctx, s.Conn, MethodTextDocumentCompletion, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// CompletionResolve sends the request from the client to the server to resolve additional information for a given completion item. +func (s *server) CompletionResolve(ctx context.Context, params *CompletionItem) (_ *CompletionItem, err error) { + s.logger.Debug("call " + MethodCompletionItemResolve) + defer s.logger.Debug("end "+MethodCompletionItemResolve, zap.Error(err)) + + var result *CompletionItem + if err := Call(ctx, s.Conn, MethodCompletionItemResolve, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Declaration sends the request from the client to the server to resolve the declaration location of a symbol at a given text document position. +// +// The result type LocationLink[] got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.textDocument.declaration.linkSupport`. +// +// @since 3.14.0. +func (s *server) Declaration(ctx context.Context, params *DeclarationParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentDeclaration) + defer s.logger.Debug("end "+MethodTextDocumentDeclaration, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDeclaration, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Definition sends the request from the client to the server to resolve the definition location of a symbol at a given text document position. +// +// The result type `[]LocationLink` got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.textDocument.definition.linkSupport`. +// +// @since 3.14.0. +func (s *server) Definition(ctx context.Context, params *DefinitionParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentDefinition) + defer s.logger.Debug("end "+MethodTextDocumentDefinition, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDefinition, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidChange sends the notification from the client to the server to signal changes to a text document. +// +// In 2.0 the shape of the params has changed to include proper version numbers and language ids. +func (s *server) DidChange(ctx context.Context, params *DidChangeTextDocumentParams) (err error) { + s.logger.Debug("notify " + MethodTextDocumentDidChange) + defer s.logger.Debug("end "+MethodTextDocumentDidChange, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidChange, params) +} + +// DidChangeConfiguration sends the notification from the client to the server to signal the change of configuration settings. +func (s *server) DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) (err error) { + s.logger.Debug("call " + MethodWorkspaceDidChangeConfiguration) + defer s.logger.Debug("end "+MethodWorkspaceDidChangeConfiguration, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkspaceDidChangeConfiguration, params) +} + +// DidChangeWatchedFiles sends the notification from the client to the server when the client detects changes to files watched by the language client. +// +// It is recommended that servers register for these file events using the registration mechanism. +// In former implementations clients pushed file events without the server actively asking for it. +func (s *server) DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) (err error) { + s.logger.Debug("call " + MethodWorkspaceDidChangeWatchedFiles) + defer s.logger.Debug("end "+MethodWorkspaceDidChangeWatchedFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkspaceDidChangeWatchedFiles, params) +} + +// DidChangeWorkspaceFolders sents the notification from the client to the server to inform the server about workspace folder configuration changes. +// +// The notification is sent by default if both ServerCapabilities/workspace/workspaceFolders and ClientCapabilities/workspace/workspaceFolders are true; +// or if the server has registered itself to receive this notification. +// To register for the workspace/didChangeWorkspaceFolders send a client/registerCapability request from the server to the client. +// +// The registration parameter must have a registrations item of the following form, where id is a unique id used to unregister the capability (the example uses a UUID). +func (s *server) DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) (err error) { + s.logger.Debug("call " + MethodWorkspaceDidChangeWorkspaceFolders) + defer s.logger.Debug("end "+MethodWorkspaceDidChangeWorkspaceFolders, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkspaceDidChangeWorkspaceFolders, params) +} + +// DidClose sends the notification from the client to the server when the document got closed in the client. +// +// The documentā€™s truth now exists where the documentā€™s Uri points to (e.g. if the documentā€™s Uri is a file Uri the truth now exists on disk). +// As with the open notification the close notification is about managing the documentā€™s content. +// Receiving a close notification doesnā€™t mean that the document was open in an editor before. +// +// A close notification requires a previous open notification to be sent. +// Note that a serverā€™s ability to fulfill requests is independent of whether a text document is open or closed. +func (s *server) DidClose(ctx context.Context, params *DidCloseTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentDidClose) + defer s.logger.Debug("end "+MethodTextDocumentDidClose, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidClose, params) +} + +// DidOpen sends the open notification from the client to the server to signal newly opened text documents. +// +// The documentā€™s truth is now managed by the client and the server must not try to read the documentā€™s truth using the documentā€™s Uri. +// Open in this sense means it is managed by the client. It doesnā€™t necessarily mean that its content is presented in an editor. +// +// An open notification must not be sent more than once without a corresponding close notification send before. +// This means open and close notification must be balanced and the max open count for a particular textDocument is one. +// Note that a serverā€™s ability to fulfill requests is independent of whether a text document is open or closed. +func (s *server) DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentDidOpen) + defer s.logger.Debug("end "+MethodTextDocumentDidOpen, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidOpen, params) +} + +// DidSave sends the notification from the client to the server when the document was saved in the client. +func (s *server) DidSave(ctx context.Context, params *DidSaveTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentDidSave) + defer s.logger.Debug("end "+MethodTextDocumentDidSave, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidSave, params) +} + +// DocumentColor sends the request from the client to the server to list all color references found in a given text document. +// +// Along with the range, a color value in RGB is returned. +// +// Clients can use the result to decorate color references in an editor. +// For example: +// +// - Color boxes showing the actual color next to the reference +// - Show a color picker when a color reference is edited. +func (s *server) DocumentColor(ctx context.Context, params *DocumentColorParams) (result []ColorInformation, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentColor) + defer s.logger.Debug("end "+MethodTextDocumentDocumentColor, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentColor, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentHighlight sends the request is from the client to the server to resolve a document highlights for a given text document position. +// +// For programming languages this usually highlights all references to the symbol scoped to this file. +// However we kept ā€˜textDocument/documentHighlightā€™ and ā€˜textDocument/referencesā€™ separate requests since the first one is allowed to be more fuzzy. +// +// Symbol matches usually have a `DocumentHighlightKind` of `Read` or `Write` whereas fuzzy or textual matches use `Text` as the kind. +func (s *server) DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) (result []DocumentHighlight, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentHighlight) + defer s.logger.Debug("end "+MethodTextDocumentDocumentHighlight, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentHighlight, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentLink sends the request from the client to the server to request the location of links in a document. +func (s *server) DocumentLink(ctx context.Context, params *DocumentLinkParams) (result []DocumentLink, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentLink) + defer s.logger.Debug("end "+MethodTextDocumentDocumentLink, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentLink, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentLinkResolve sends the request from the client to the server to resolve the target of a given document link. +func (s *server) DocumentLinkResolve(ctx context.Context, params *DocumentLink) (_ *DocumentLink, err error) { + s.logger.Debug("call " + MethodDocumentLinkResolve) + defer s.logger.Debug("end "+MethodDocumentLinkResolve, zap.Error(err)) + + var result *DocumentLink + if err := Call(ctx, s.Conn, MethodDocumentLinkResolve, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentSymbol sends the request from the client to the server to return a flat list of all symbols found in a given text document. +// +// Neither the symbolā€™s location range nor the symbolā€™s container name should be used to infer a hierarchy. +func (s *server) DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) (result []interface{}, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentSymbol) + defer s.logger.Debug("end "+MethodTextDocumentDocumentSymbol, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentSymbol, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// ExecuteCommand sends the request from the client to the server to trigger command execution on the server. +// +// In most cases the server creates a `WorkspaceEdit` structure and applies the changes to the workspace using the +// request `workspace/applyEdit` which is sent from the server to the client. +func (s *server) ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (result interface{}, err error) { + s.logger.Debug("call " + MethodWorkspaceExecuteCommand) + defer s.logger.Debug("end "+MethodWorkspaceExecuteCommand, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWorkspaceExecuteCommand, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// FoldingRanges sends the request from the client to the server to return all folding ranges found in a given text document. +// +// @since version 3.10.0. +func (s *server) FoldingRanges(ctx context.Context, params *FoldingRangeParams) (result []FoldingRange, err error) { + s.logger.Debug("call " + MethodTextDocumentFoldingRange) + defer s.logger.Debug("end "+MethodTextDocumentFoldingRange, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentFoldingRange, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Formatting sends the request from the client to the server to format a whole document. +func (s *server) Formatting(ctx context.Context, params *DocumentFormattingParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentFormatting) + defer s.logger.Debug("end "+MethodTextDocumentFormatting, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentFormatting, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Hover sends the request is from the client to the server to request hover information at a given text document position. +func (s *server) Hover(ctx context.Context, params *HoverParams) (_ *Hover, err error) { + s.logger.Debug("call " + MethodTextDocumentHover) + defer s.logger.Debug("end "+MethodTextDocumentHover, zap.Error(err)) + + var result *Hover + if err := Call(ctx, s.Conn, MethodTextDocumentHover, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Implementation sends the request from the client to the server to resolve the implementation location of a symbol at a given text document position. +// +// The result type `[]LocationLink` got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.implementation.typeDefinition.linkSupport`. +func (s *server) Implementation(ctx context.Context, params *ImplementationParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentImplementation) + defer s.logger.Debug("end "+MethodTextDocumentImplementation, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentImplementation, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// OnTypeFormatting sends the request from the client to the server to format parts of the document during typing. +func (s *server) OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentOnTypeFormatting) + defer s.logger.Debug("end "+MethodTextDocumentOnTypeFormatting, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentOnTypeFormatting, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// PrepareRename sends the request from the client to the server to setup and test the validity of a rename operation at a given location. +// +// @since version 3.12.0. +func (s *server) PrepareRename(ctx context.Context, params *PrepareRenameParams) (result *Range, err error) { + s.logger.Debug("call " + MethodTextDocumentPrepareRename) + defer s.logger.Debug("end "+MethodTextDocumentPrepareRename, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentPrepareRename, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// RangeFormatting sends the request from the client to the server to format a given range in a document. +func (s *server) RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentRangeFormatting) + defer s.logger.Debug("end "+MethodTextDocumentRangeFormatting, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentRangeFormatting, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// References sends the request from the client to the server to resolve project-wide references for the symbol denoted by the given text document position. +func (s *server) References(ctx context.Context, params *ReferenceParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentReferences) + defer s.logger.Debug("end "+MethodTextDocumentReferences, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentReferences, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Rename sends the request from the client to the server to perform a workspace-wide rename of a symbol. +func (s *server) Rename(ctx context.Context, params *RenameParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentRename) + defer s.logger.Debug("end "+MethodTextDocumentRename, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentRename, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SignatureHelp sends the request from the client to the server to request signature information at a given cursor position. +func (s *server) SignatureHelp(ctx context.Context, params *SignatureHelpParams) (_ *SignatureHelp, err error) { + s.logger.Debug("call " + MethodTextDocumentSignatureHelp) + defer s.logger.Debug("end "+MethodTextDocumentSignatureHelp, zap.Error(err)) + + var result *SignatureHelp + if err := Call(ctx, s.Conn, MethodTextDocumentSignatureHelp, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Symbols sends the request from the client to the server to list project-wide symbols matching the query string. +func (s *server) Symbols(ctx context.Context, params *WorkspaceSymbolParams) (result []SymbolInformation, err error) { + s.logger.Debug("call " + MethodWorkspaceSymbol) + defer s.logger.Debug("end "+MethodWorkspaceSymbol, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWorkspaceSymbol, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// TypeDefinition sends the request from the client to the server to resolve the type definition location of a symbol at a given text document position. +// +// The result type `[]LocationLink` got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.textDocument.typeDefinition.linkSupport`. +// +// @since version 3.6.0. +func (s *server) TypeDefinition(ctx context.Context, params *TypeDefinitionParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentTypeDefinition) + defer s.logger.Debug("end "+MethodTextDocumentTypeDefinition, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentTypeDefinition, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// WillSave sends the notification from the client to the server before the document is actually saved. +func (s *server) WillSave(ctx context.Context, params *WillSaveTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentWillSave) + defer s.logger.Debug("end "+MethodTextDocumentWillSave, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentWillSave, params) +} + +// WillSaveWaitUntil sends the request from the client to the server before the document is actually saved. +// +// The request can return an array of TextEdits which will be applied to the text document before it is saved. +// Please note that clients might drop results if computing the text edits took too long or if a server constantly fails on this request. +// This is done to keep the save fast and reliable. +func (s *server) WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentWillSaveWaitUntil) + defer s.logger.Debug("end "+MethodTextDocumentWillSaveWaitUntil, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentWillSaveWaitUntil, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// ShowDocument sends the request from a server to a client to ask the client to display a particular document in the user interface. +// +// @since 3.16.0. +func (s *server) ShowDocument(ctx context.Context, params *ShowDocumentParams) (result *ShowDocumentResult, err error) { + s.logger.Debug("call " + MethodShowDocument) + defer s.logger.Debug("end "+MethodShowDocument, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodShowDocument, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// WillCreateFiles sends the will create files request is sent from the client to the server before files are actually created as long as the creation is triggered from within the client. +// +// The request can return a WorkspaceEdit which will be applied to workspace before the files are created. +// +// Please note that clients might drop results if computing the edit took too long or if a server constantly fails on this request. This is done to keep creates fast and reliable. +// +// @since 3.16.0. +func (s *server) WillCreateFiles(ctx context.Context, params *CreateFilesParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodWillCreateFiles) + defer s.logger.Debug("end "+MethodWillCreateFiles, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWillCreateFiles, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidCreateFiles sends the did create files notification is sent from the client to the server when files were created from within the client. +// +// @since 3.16.0. +func (s *server) DidCreateFiles(ctx context.Context, params *CreateFilesParams) (err error) { + s.logger.Debug("call " + MethodDidCreateFiles) + defer s.logger.Debug("end "+MethodDidCreateFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodDidCreateFiles, params) +} + +// WillRenameFiles sends the will rename files request is sent from the client to the server before files are actually renamed as long as the rename is triggered from within the client. +// +// The request can return a WorkspaceEdit which will be applied to workspace before the files are renamed. +// +// Please note that clients might drop results if computing the edit took too long or if a server constantly fails on this request. This is done to keep renames fast and reliable. +// +// @since 3.16.0. +func (s *server) WillRenameFiles(ctx context.Context, params *RenameFilesParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodWillRenameFiles) + defer s.logger.Debug("end "+MethodWillRenameFiles, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWillRenameFiles, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidRenameFiles sends the did rename files notification is sent from the client to the server when files were renamed from within the client. +// +// @since 3.16.0. +func (s *server) DidRenameFiles(ctx context.Context, params *RenameFilesParams) (err error) { + s.logger.Debug("call " + MethodDidRenameFiles) + defer s.logger.Debug("end "+MethodDidRenameFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodDidRenameFiles, params) +} + +// WillDeleteFiles sends the will delete files request is sent from the client to the server before files are actually deleted as long as the deletion is triggered from within the client. +// +// The request can return a WorkspaceEdit which will be applied to workspace before the files are deleted. +// +// Please note that clients might drop results if computing the edit took too long or if a server constantly fails on this request. This is done to keep deletes fast and reliable. +// +// @since 3.16.0. +func (s *server) WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodWillDeleteFiles) + defer s.logger.Debug("end "+MethodWillDeleteFiles, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWillDeleteFiles, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidDeleteFiles sends the did delete files notification is sent from the client to the server when files were deleted from within the client. +// +// @since 3.16.0. +func (s *server) DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) (err error) { + s.logger.Debug("call " + MethodDidDeleteFiles) + defer s.logger.Debug("end "+MethodDidDeleteFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodDidDeleteFiles, params) +} + +// CodeLensRefresh sent from the server to the client. +// +// Servers can use it to ask clients to refresh the code lenses currently shown in editors. +// As a result the client should ask the server to recompute the code lenses for these editors. +// This is useful if a server detects a configuration change which requires a re-calculation of all code lenses. +// +// Note that the client still has the freedom to delay the re-calculation of the code lenses if for example an editor is currently not visible. +// +// @since 3.16.0. +func (s *server) CodeLensRefresh(ctx context.Context) (err error) { + s.logger.Debug("call " + MethodCodeLensRefresh) + defer s.logger.Debug("end "+MethodCodeLensRefresh, zap.Error(err)) + + return Call(ctx, s.Conn, MethodCodeLensRefresh, nil, nil) +} + +// PrepareCallHierarchy sent from the client to the server to return a call hierarchy for the language element of given text document positions. +// +// The call hierarchy requests are executed in two steps: +// 1. first a call hierarchy item is resolved for the given text document position +// 2. for a call hierarchy item the incoming or outgoing call hierarchy items are resolved. +// +// @since 3.16.0. +func (s *server) PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) (result []CallHierarchyItem, err error) { + s.logger.Debug("call " + MethodTextDocumentPrepareCallHierarchy) + defer s.logger.Debug("end "+MethodTextDocumentPrepareCallHierarchy, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentPrepareCallHierarchy, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// IncomingCalls is the request is sent from the client to the server to resolve incoming calls for a given call hierarchy item. +// +// The request doesnā€™t define its own client and server capabilities. It is only issued if a server registers for the "textDocument/prepareCallHierarchy" request. +// +// @since 3.16.0. +func (s *server) IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) (result []CallHierarchyIncomingCall, err error) { + s.logger.Debug("call " + MethodCallHierarchyIncomingCalls) + defer s.logger.Debug("end "+MethodCallHierarchyIncomingCalls, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodCallHierarchyIncomingCalls, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// OutgoingCalls is the request is sent from the client to the server to resolve outgoing calls for a given call hierarchy item. +// +// The request doesnā€™t define its own client and server capabilities. It is only issued if a server registers for the "textDocument/prepareCallHierarchy" request. +// +// @since 3.16.0. +func (s *server) OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) (result []CallHierarchyOutgoingCall, err error) { + s.logger.Debug("call " + MethodCallHierarchyOutgoingCalls) + defer s.logger.Debug("end "+MethodCallHierarchyOutgoingCalls, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodCallHierarchyOutgoingCalls, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensFull is the request is sent from the client to the server to resolve semantic tokens for a given file. +// +// Semantic tokens are used to add additional color information to a file that depends on language specific symbol information. +// +// A semantic token request usually produces a large result. The protocol therefore supports encoding tokens with numbers. +// +// @since 3.16.0. +func (s *server) SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (result *SemanticTokens, err error) { + s.logger.Debug("call " + MethodSemanticTokensFull) + defer s.logger.Debug("end "+MethodSemanticTokensFull, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodSemanticTokensFull, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensFullDelta is the request is sent from the client to the server to resolve semantic token delta for a given file. +// +// Semantic tokens are used to add additional color information to a file that depends on language specific symbol information. +// +// A semantic token request usually produces a large result. The protocol therefore supports encoding tokens with numbers. +// +// @since 3.16.0. +func (s *server) SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (result interface{}, err error) { + s.logger.Debug("call " + MethodSemanticTokensFullDelta) + defer s.logger.Debug("end "+MethodSemanticTokensFullDelta, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodSemanticTokensFullDelta, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensRange is the request is sent from the client to the server to resolve semantic token delta for a given file. +// +// When a user opens a file it can be beneficial to only compute the semantic tokens for the visible range (faster rendering of the tokens in the user interface). +// If a server can compute these tokens faster than for the whole file it can provide a handler for the "textDocument/semanticTokens/range" request to handle this case special. +// +// Please note that if a client also announces that it will send the "textDocument/semanticTokens/range" server should implement this request as well to allow for flicker free scrolling and semantic coloring of a minimap. +// +// @since 3.16.0. +func (s *server) SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (result *SemanticTokens, err error) { + s.logger.Debug("call " + MethodSemanticTokensRange) + defer s.logger.Debug("end "+MethodSemanticTokensRange, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodSemanticTokensRange, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensRefresh is sent from the server to the client. Servers can use it to ask clients to refresh the editors for which this server provides semantic tokens. +// +// As a result the client should ask the server to recompute the semantic tokens for these editors. +// This is useful if a server detects a project wide configuration change which requires a re-calculation of all semantic tokens. +// +// Note that the client still has the freedom to delay the re-calculation of the semantic tokens if for example an editor is currently not visible. +// +// @since 3.16.0. +func (s *server) SemanticTokensRefresh(ctx context.Context) (err error) { + s.logger.Debug("call " + MethodSemanticTokensRefresh) + defer s.logger.Debug("end "+MethodSemanticTokensRefresh, zap.Error(err)) + + return Call(ctx, s.Conn, MethodSemanticTokensRefresh, nil, nil) +} + +// LinkedEditingRange is the linked editing request is sent from the client to the server to return for a given position in a document the range of the symbol at the position and all ranges that have the same content. +// +// Optionally a word pattern can be returned to describe valid contents. +// +// A rename to one of the ranges can be applied to all other ranges if the new content is valid. If no result-specific word pattern is provided, the word pattern from the clientā€™s language configuration is used. +// +// @since 3.16.0. +func (s *server) LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (result *LinkedEditingRanges, err error) { + s.logger.Debug("call " + MethodLinkedEditingRange) + defer s.logger.Debug("end "+MethodLinkedEditingRange, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodLinkedEditingRange, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Moniker is the request is sent from the client to the server to get the symbol monikers for a given text document position. +// +// An array of Moniker types is returned as response to indicate possible monikers at the given location. +// +// If no monikers can be calculated, an empty array or null should be returned. +// +// @since 3.16.0. +func (s *server) Moniker(ctx context.Context, params *MonikerParams) (result []Moniker, err error) { + s.logger.Debug("call " + MethodMoniker) + defer s.logger.Debug("end "+MethodMoniker, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodMoniker, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Request sends a request from the client to the server that non-compliant with the Language Server Protocol specifications. +func (s *server) Request(ctx context.Context, method string, params interface{}) (interface{}, error) { + s.logger.Debug("call " + method) + defer s.logger.Debug("end " + method) + + var result interface{} + if err := Call(ctx, s.Conn, method, params, &result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/go.lsp.dev/protocol/text.go b/vendor/go.lsp.dev/protocol/text.go new file mode 100644 index 00000000000..34aec1bb5bc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/text.go @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// DidOpenTextDocumentParams params of DidOpenTextDocument notification. +type DidOpenTextDocumentParams struct { + // TextDocument is the document that was opened. + TextDocument TextDocumentItem `json:"textDocument"` +} + +// DidChangeTextDocumentParams params of DidChangeTextDocument notification. +type DidChangeTextDocumentParams struct { + // TextDocument is the document that did change. The version number points + // to the version after all provided content changes have + // been applied. + TextDocument VersionedTextDocumentIdentifier `json:"textDocument"` + + // ContentChanges is the actual content changes. The content changes describe single state changes + // to the document. So if there are two content changes c1 and c2 for a document + // in state S then c1 move the document to S' and c2 to S''. + ContentChanges []TextDocumentContentChangeEvent `json:"contentChanges"` // []TextDocumentContentChangeEvent | text +} + +// TextDocumentSaveReason represents reasons why a text document is saved. +type TextDocumentSaveReason float64 + +const ( + // TextDocumentSaveReasonManual is the manually triggered, e.g. by the user pressing save, by starting debugging, + // or by an API call. + TextDocumentSaveReasonManual TextDocumentSaveReason = 1 + + // TextDocumentSaveReasonAfterDelay is the automatic after a delay. + TextDocumentSaveReasonAfterDelay TextDocumentSaveReason = 2 + + // TextDocumentSaveReasonFocusOut when the editor lost focus. + TextDocumentSaveReasonFocusOut TextDocumentSaveReason = 3 +) + +// String implements fmt.Stringer. +func (t TextDocumentSaveReason) String() string { + switch t { + case TextDocumentSaveReasonManual: + return "Manual" + case TextDocumentSaveReasonAfterDelay: + return "AfterDelay" + case TextDocumentSaveReasonFocusOut: + return "FocusOut" + default: + return strconv.FormatFloat(float64(t), 'f', -10, 64) + } +} + +// TextDocumentChangeRegistrationOptions describe options to be used when registering for text document change events. +type TextDocumentChangeRegistrationOptions struct { + TextDocumentRegistrationOptions + + // SyncKind how documents are synced to the server. See TextDocumentSyncKind.Full + // and TextDocumentSyncKind.Incremental. + SyncKind TextDocumentSyncKind `json:"syncKind"` +} + +// WillSaveTextDocumentParams is the parameters send in a will save text document notification. +type WillSaveTextDocumentParams struct { + // TextDocument is the document that will be saved. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Reason is the 'TextDocumentSaveReason'. + Reason TextDocumentSaveReason `json:"reason,omitempty"` +} + +// DidSaveTextDocumentParams params of DidSaveTextDocument notification. +type DidSaveTextDocumentParams struct { + // Text optional the content when saved. Depends on the includeText value + // when the save notification was requested. + Text string `json:"text,omitempty"` + + // TextDocument is the document that was saved. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// TextDocumentContentChangeEvent an event describing a change to a text document. If range and rangeLength are omitted +// the new text is considered to be the full content of the document. +type TextDocumentContentChangeEvent struct { + // Range is the range of the document that changed. + Range Range `json:"range"` + + // RangeLength is the length of the range that got replaced. + RangeLength uint32 `json:"rangeLength,omitempty"` + + // Text is the new text of the document. + Text string `json:"text"` +} + +// TextDocumentSaveRegistrationOptions TextDocumentSave Registration options. +type TextDocumentSaveRegistrationOptions struct { + TextDocumentRegistrationOptions + + // IncludeText is the client is supposed to include the content on save. + IncludeText bool `json:"includeText,omitempty"` +} + +// DidCloseTextDocumentParams params of DidCloseTextDocument notification. +type DidCloseTextDocumentParams struct { + // TextDocument the document that was closed. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} diff --git a/vendor/go.lsp.dev/protocol/util.go b/vendor/go.lsp.dev/protocol/util.go new file mode 100644 index 00000000000..4dc29c438ab --- /dev/null +++ b/vendor/go.lsp.dev/protocol/util.go @@ -0,0 +1,9 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// NewVersion returns the int32 pointer converted i. +func NewVersion(i int32) *int32 { + return &i +} diff --git a/vendor/go.lsp.dev/protocol/version.go b/vendor/go.lsp.dev/protocol/version.go new file mode 100644 index 00000000000..79a27f348fe --- /dev/null +++ b/vendor/go.lsp.dev/protocol/version.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: 2018 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// Version is the version of the language-server-protocol specification being implemented. +const Version = "3.15.3" diff --git a/vendor/go.lsp.dev/protocol/window.go b/vendor/go.lsp.dev/protocol/window.go new file mode 100644 index 00000000000..b6af6f43818 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/window.go @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import "strconv" + +// ShowMessageParams params of ShowMessage notification. +type ShowMessageParams struct { + // Message is the actual message. + Message string `json:"message"` + + // Type is the message type. + Type MessageType `json:"type"` +} + +// MessageType type of ShowMessageParams type. +type MessageType float64 + +const ( + // MessageTypeError an error message. + MessageTypeError MessageType = 1 + // MessageTypeWarning a warning message. + MessageTypeWarning MessageType = 2 + // MessageTypeInfo an information message. + MessageTypeInfo MessageType = 3 + // MessageTypeLog a log message. + MessageTypeLog MessageType = 4 +) + +// String implements fmt.Stringer. +func (m MessageType) String() string { + switch m { + case MessageTypeError: + return "error" + case MessageTypeWarning: + return "warning" + case MessageTypeInfo: + return "info" + case MessageTypeLog: + return "log" + default: + return strconv.FormatFloat(float64(m), 'f', -10, 64) + } +} + +// Enabled reports whether the level is enabled. +func (m MessageType) Enabled(level MessageType) bool { + return level > 0 && m >= level +} + +// messageTypeMap map of MessageTypes. +var messageTypeMap = map[string]MessageType{ + "error": MessageTypeError, + "warning": MessageTypeWarning, + "info": MessageTypeInfo, + "log": MessageTypeLog, +} + +// ToMessageType converts level to the MessageType. +func ToMessageType(level string) MessageType { + mt, ok := messageTypeMap[level] + if !ok { + return MessageType(0) // unknown + } + + return mt +} + +// ShowMessageRequestParams params of ShowMessage request. +type ShowMessageRequestParams struct { + // Actions is the message action items to present. + Actions []MessageActionItem `json:"actions"` + + // Message is the actual message + Message string `json:"message"` + + // Type is the message type. See {@link MessageType} + Type MessageType `json:"type"` +} + +// MessageActionItem item of ShowMessageRequestParams action. +type MessageActionItem struct { + // Title a short title like 'Retry', 'Open Log' etc. + Title string `json:"title"` +} + +// LogMessageParams params of LogMessage notification. +type LogMessageParams struct { + // Message is the actual message + Message string `json:"message"` + + // Type is the message type. See {@link MessageType} + Type MessageType `json:"type"` +} + +// WorkDoneProgressCreateParams params of WorkDoneProgressCreate request. +// +// @since 3.15.0. +type WorkDoneProgressCreateParams struct { + // Token is the token to be used to report progress. + Token ProgressToken `json:"token"` +} + +// WorkDoneProgressCreateParams params of WorkDoneProgressCancel request. +// +// @since 3.15.0. +type WorkDoneProgressCancelParams struct { + // Token is the token to be used to report progress. + Token ProgressToken `json:"token"` +} diff --git a/vendor/go.lsp.dev/protocol/workspace.go b/vendor/go.lsp.dev/protocol/workspace.go new file mode 100644 index 00000000000..3d39cd76626 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/workspace.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" + + "go.lsp.dev/uri" +) + +// WorkspaceFolder response of Workspace folders request. +type WorkspaceFolder struct { + // URI is the associated URI for this workspace folder. + URI string `json:"uri"` + + // Name is the name of the workspace folder. Used to refer to this + // workspace folder in the user interface. + Name string `json:"name"` +} + +// DidChangeWorkspaceFoldersParams params of DidChangeWorkspaceFolders notification. +type DidChangeWorkspaceFoldersParams struct { + // Event is the actual workspace folder change event. + Event WorkspaceFoldersChangeEvent `json:"event"` +} + +// WorkspaceFoldersChangeEvent is the workspace folder change event. +type WorkspaceFoldersChangeEvent struct { + // Added is the array of added workspace folders + Added []WorkspaceFolder `json:"added"` + + // Removed is the array of the removed workspace folders + Removed []WorkspaceFolder `json:"removed"` +} + +// DidChangeConfigurationParams params of DidChangeConfiguration notification. +type DidChangeConfigurationParams struct { + // Settings is the actual changed settings + Settings interface{} `json:"settings,omitempty"` +} + +// ConfigurationParams params of Configuration request. +type ConfigurationParams struct { + Items []ConfigurationItem `json:"items"` +} + +// ConfigurationItem a ConfigurationItem consists of the configuration section to ask for and an additional scope URI. +// The configuration section ask for is defined by the server and doesnā€™t necessarily need to correspond to the configuration store used be the client. +// So a server might ask for a configuration cpp.formatterOptions but the client stores the configuration in a XML store layout differently. +// It is up to the client to do the necessary conversion. If a scope URI is provided the client should return the setting scoped to the provided resource. +// If the client for example uses EditorConfig to manage its settings the configuration should be returned for the passed resource URI. If the client canā€™t provide a configuration setting for a given scope then null need to be present in the returned array. +type ConfigurationItem struct { + // ScopeURI is the scope to get the configuration section for. + ScopeURI uri.URI `json:"scopeUri,omitempty"` + + // Section is the configuration section asked for. + Section string `json:"section,omitempty"` +} + +// DidChangeWatchedFilesParams params of DidChangeWatchedFiles notification. +type DidChangeWatchedFilesParams struct { + // Changes is the actual file events. + Changes []*FileEvent `json:"changes,omitempty"` +} + +// FileEvent an event describing a file change. +type FileEvent struct { + // Type is the change type. + Type FileChangeType `json:"type"` + + // URI is the file's URI. + URI uri.URI `json:"uri"` +} + +// FileChangeType is the file event type. +type FileChangeType float64 + +const ( + // FileChangeTypeCreated is the file got created. + FileChangeTypeCreated FileChangeType = 1 + // FileChangeTypeChanged is the file got changed. + FileChangeTypeChanged FileChangeType = 2 + // FileChangeTypeDeleted is the file got deleted. + FileChangeTypeDeleted FileChangeType = 3 +) + +// String implements fmt.Stringer. +func (t FileChangeType) String() string { + switch t { + case FileChangeTypeCreated: + return "Created" + case FileChangeTypeChanged: + return "Changed" + case FileChangeTypeDeleted: + return "Deleted" + default: + return strconv.FormatFloat(float64(t), 'f', -10, 64) + } +} + +// DidChangeWatchedFilesRegistrationOptions describe options to be used when registering for file system change events. +type DidChangeWatchedFilesRegistrationOptions struct { + // Watchers is the watchers to register. + Watchers []FileSystemWatcher `json:"watchers"` +} + +// FileSystemWatcher watchers of DidChangeWatchedFiles Registration options. +type FileSystemWatcher struct { + // GlobPattern is the glob pattern to watch. + // + // Glob patterns can have the following syntax: + // - `*` to match one or more characters in a path segment + // - `?` to match on one character in a path segment + // - `**` to match any number of path segments, including none + // - `{}` to group conditions (e.g. `**/*.{ts,js}` matches all TypeScript and JavaScript files) + // - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, ā€¦) + // - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) + GlobPattern string `json:"globPattern"` + + // Kind is the kind of events of interest. If omitted it defaults + // to WatchKind.Create | WatchKind.Change | WatchKind.Delete + // which is 7. + Kind WatchKind `json:"kind,omitempty"` +} + +// WatchKind kind of FileSystemWatcher kind. +type WatchKind float64 + +const ( + // WatchKindCreate interested in create events. + WatchKindCreate WatchKind = 1 + + // WatchKindChange interested in change events. + WatchKindChange WatchKind = 2 + + // WatchKindDelete interested in delete events. + WatchKindDelete WatchKind = 4 +) + +// String implements fmt.Stringer. +func (k WatchKind) String() string { + switch k { + case WatchKindCreate: + return "Create" + case WatchKindChange: + return "Change" + case WatchKindDelete: + return "Delete" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// WorkspaceSymbolParams is the parameters of a Workspace Symbol request. +type WorkspaceSymbolParams struct { + WorkDoneProgressParams + PartialResultParams + + // Query a query string to filter symbols by. + // + // Clients may send an empty string here to request all symbols. + Query string `json:"query"` +} + +// ExecuteCommandParams params of Execute a command. +type ExecuteCommandParams struct { + WorkDoneProgressParams + + // Command is the identifier of the actual command handler. + Command string `json:"command"` + + // Arguments that the command should be invoked with. + Arguments []interface{} `json:"arguments,omitempty"` +} + +// ExecuteCommandRegistrationOptions execute command registration options. +type ExecuteCommandRegistrationOptions struct { + // Commands is the commands to be executed on the server + Commands []string `json:"commands"` +} + +// ApplyWorkspaceEditParams params of Applies a WorkspaceEdit. +type ApplyWorkspaceEditParams struct { + // Label an optional label of the workspace edit. This label is + // presented in the user interface for example on an undo + // stack to undo the workspace edit. + Label string `json:"label,omitempty"` + + // Edit is the edits to apply. + Edit WorkspaceEdit `json:"edit"` +} + +// ApplyWorkspaceEditResponse response of Applies a WorkspaceEdit. +type ApplyWorkspaceEditResponse struct { + // Applied indicates whether the edit was applied or not. + Applied bool `json:"applied"` + + // FailureReason an optional textual description for why the edit was not applied. + // This may be used by the server for diagnostic logging or to provide + // a suitable error for a request that triggered the edit. + // + // @since 3.16.0. + FailureReason string `json:"failureReason,omitempty"` + + // FailedChange depending on the client's failure handling strategy "failedChange" + // might contain the index of the change that failed. This property is + // only available if the client signals a "failureHandlingStrategy" + // in its client capabilities. + // + // @since 3.16.0. + FailedChange uint32 `json:"failedChange,omitempty"` +} diff --git a/vendor/go.lsp.dev/uri/.codecov.yml b/vendor/go.lsp.dev/uri/.codecov.yml new file mode 100644 index 00000000000..cb891cd5e0b --- /dev/null +++ b/vendor/go.lsp.dev/uri/.codecov.yml @@ -0,0 +1,24 @@ +coverage: + precision: 1 + round: down + range: "70...100" + + status: + project: + default: off + target: auto + threshold: 10% + if_not_found: success + if_ci_failed: error + patch: + default: off + only_pulls: true + target: 50% + threshold: 10% + changes: false + ignore: + - "vendor" + +comment: + behavior: default + require_changes: true diff --git a/vendor/go.lsp.dev/uri/.gitattributes b/vendor/go.lsp.dev/uri/.gitattributes new file mode 100644 index 00000000000..cae764e31f6 --- /dev/null +++ b/vendor/go.lsp.dev/uri/.gitattributes @@ -0,0 +1,11 @@ +# go.lsp.dev/uri project gitattributes file +# https://github.com/github/linguist#using-gitattributes +# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml + +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text + +docs/ linguist-documentation +*.pb.go linguist-generated +*_gen.go linguist-generated +*_string.go linguist-generated diff --git a/vendor/go.lsp.dev/uri/.gitignore b/vendor/go.lsp.dev/uri/.gitignore new file mode 100644 index 00000000000..04497949dff --- /dev/null +++ b/vendor/go.lsp.dev/uri/.gitignore @@ -0,0 +1,49 @@ +# go.lsp.dev/uri project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +# please do not open a pull request to add something created by your editor or tools + +# github/gitignore/Go.gitignore +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +# cgo generated +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +# test generated +_testmain.go + +# profile +*.pprof + +# coverage +coverage.* diff --git a/vendor/go.lsp.dev/uri/.golangci.yml b/vendor/go.lsp.dev/uri/.golangci.yml new file mode 100644 index 00000000000..14d15c23517 --- /dev/null +++ b/vendor/go.lsp.dev/uri/.golangci.yml @@ -0,0 +1,151 @@ +run: + issues-exit-code: 1 + tests: true + skip-dirs: + - "vendor$" + skip-files: + - ".*\\.pb\\.go" + - ".*(.|_)gen\\.go" + modules-download-mode: vendor + +linters-settings: + dupl: + threshold: 400 + errcheck: + check-type-assertions: true + check-blank: true + # exclude: .errcheckignore + funlen: + lines: 80 + statements: 40 + goconst: + min-len: 3 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + settings: + hugeParam: + sizeThreshold: 80 + rangeExprCopy: + sizeThreshold: 512 + rangeValCopy: + sizeThreshold: 128 + gocyclo: + min-complexity: 10 + gofmt: + simplify: true + goimports: + local-prefixes: go.lsp.dev/uri + golint: + min-confidence: 0.3 + govet: + enable: + - asmdecl + - assign + - atomic + - atomicalign + - bools + - buildssa + - buildtag + - cgocall + - composite + - copylock + - ctrlflow + - deepequalerrors + - errorsas + - findcall + - httpresponse + - inspect + - loopclosure + - lostcancel + - nilfunc + - nilness + - pkgfact + - printf + - shift + - sortslice + - stdmethods + - structtag + - tests + - unmarshal + - unreachable + - unsafeptr + - unusedresult + disable: + - shadow + lll: + line-length: 180 + tab-width: 1 + maligned: + suggest-new: false + misspell: + locale: US + nakedret: + max-func-lines: 30 + prealloc: + simple: true + range-loops: true + for-loops: false + unparam: + algo: cha + check-exported: true + unused: + check-exported: false + +linters: + # disabled: + # - funlen + # - gochecknoglobals + # - gochecknoinits + # - gocyclo + # - godox + # - gomnd + # - maligned + # - megacheck + # - scopelint + # - wsl + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - gocognit + - goconst + - gocritic + - gofmt + - goimports + - golint + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - lll + - misspell + - nakedret + - prealloc + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + +issues: + exclude-use-default: true + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true diff --git a/vendor/go.lsp.dev/uri/LICENSE b/vendor/go.lsp.dev/uri/LICENSE new file mode 100644 index 00000000000..e8748709cfb --- /dev/null +++ b/vendor/go.lsp.dev/uri/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/uri/Makefile b/vendor/go.lsp.dev/uri/Makefile new file mode 100644 index 00000000000..f4aeab60dfb --- /dev/null +++ b/vendor/go.lsp.dev/uri/Makefile @@ -0,0 +1,15 @@ +# ---------------------------------------------------------------------------- +# global + +.DEFAULT_GOAL = test + +# ---------------------------------------------------------------------------- +# target + +# ---------------------------------------------------------------------------- +# include + +include hack/make/go.mk + +# ---------------------------------------------------------------------------- +# overlays diff --git a/vendor/go.lsp.dev/uri/README.md b/vendor/go.lsp.dev/uri/README.md new file mode 100644 index 00000000000..5d8909cc3b7 --- /dev/null +++ b/vendor/go.lsp.dev/uri/README.md @@ -0,0 +1,19 @@ +# uri + +[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga] + +Package uri is an implementation of the URI Uniform Resource Identifier(RFC3986) specification for Go. + + + +[circleci]: https://app.circleci.com/pipelines/github/go-language-server/uri +[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/uri +[module]: https://github.com/go-language-server/uri/releases/latest +[codecov]: https://codecov.io/gh/go-language-server/uri +[ga]: https://github.com/go-language-server/uri + +[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/uri/master.svg?style=for-the-badge&label=CIRCLECI&logo=circleci +[pkg.go.dev-badge]: https://bit.ly/shields-io-pkg-go-dev +[module-badge]: https://img.shields.io/github/release/go-language-server/uri.svg?color=00add8&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4= +[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/uri/master?logo=codecov&style=for-the-badge +[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/uri?useReferer&pixel diff --git a/vendor/go.lsp.dev/uri/doc.go b/vendor/go.lsp.dev/uri/doc.go new file mode 100644 index 00000000000..3d9d72e0756 --- /dev/null +++ b/vendor/go.lsp.dev/uri/doc.go @@ -0,0 +1,6 @@ +// Copyright 2019 The Go Language Server Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uri is an implementation of the URI Uniform Resource Identifier(RFC3986) specification for Go. +package uri // import "go.lsp.dev/uri" diff --git a/vendor/go.lsp.dev/uri/uri.go b/vendor/go.lsp.dev/uri/uri.go new file mode 100644 index 00000000000..32fb3135875 --- /dev/null +++ b/vendor/go.lsp.dev/uri/uri.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Language Server Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uri + +import ( + "errors" + "fmt" + "net/url" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +const ( + // FileScheme schema of filesystem path. + FileScheme = "file" + + // HTTPScheme schema of http. + HTTPScheme = "http" + + // HTTPSScheme schema of https. + HTTPSScheme = "https" +) + +const ( + hierPart = "://" +) + +// URI Uniform Resource Identifier (URI) https://tools.ietf.org/html/rfc3986. +// +// This class is a simple parser which creates the basic component parts +// (http://tools.ietf.org/html/rfc3986#section-3) with minimal validation +// and encoding. +// +// foo://example.com:8042/over/there?name=ferret#nose +// \_/ \______________/\_________/ \_________/ \__/ +// | | | | | +// scheme authority path query fragment +// | _____________________|__ +// / \ / \ +// urn:example:animal:ferret:nose +type URI string + +// Filename returns the file path for the given URI. +// It is an error to call this on a URI that is not a valid filename. +func (u URI) Filename() string { + filename, err := filename(u) + if err != nil { + panic(err) + } + + return filepath.FromSlash(filename) +} + +func filename(uri URI) (string, error) { + u, err := url.ParseRequestURI(string(uri)) + if err != nil { + return "", fmt.Errorf("failed to parse request URI: %w", err) + } + + if u.Scheme != FileScheme { + return "", fmt.Errorf("only file URIs are supported, got %v", u.Scheme) + } + + if isWindowsDriveURI(u.Path) { + u.Path = u.Path[1:] + } + + return u.Path, nil +} + +// New parses and creates a new URI from s. +func New(s string) URI { + if u, err := url.PathUnescape(s); err == nil { + s = u + } + + if strings.HasPrefix(s, FileScheme+hierPart) { + return URI(s) + } + + return File(s) +} + +// File parses and creates a new filesystem URI from path. +func File(path string) URI { + const goRootPragma = "$GOROOT" + if len(path) >= len(goRootPragma) && strings.EqualFold(goRootPragma, path[:len(goRootPragma)]) { + path = runtime.GOROOT() + path[len(goRootPragma):] + } + + if !isWindowsDrivePath(path) { + if abs, err := filepath.Abs(path); err == nil { + path = abs + } + } + + if isWindowsDrivePath(path) { + path = "/" + path + } + + path = filepath.ToSlash(path) + u := url.URL{ + Scheme: FileScheme, + Path: path, + } + + return URI(u.String()) +} + +// Parse parses and creates a new URI from s. +func Parse(s string) (u URI, err error) { + us, err := url.Parse(s) + if err != nil { + return u, fmt.Errorf("url.Parse: %w", err) + } + + switch us.Scheme { + case FileScheme: + ut := url.URL{ + Scheme: FileScheme, + Path: us.Path, + RawPath: filepath.FromSlash(us.Path), + } + u = URI(ut.String()) + + case HTTPScheme, HTTPSScheme: + ut := url.URL{ + Scheme: us.Scheme, + Host: us.Host, + Path: us.Path, + RawQuery: us.Query().Encode(), + Fragment: us.Fragment, + } + u = URI(ut.String()) + + default: + return u, errors.New("unknown scheme") + } + + return +} + +// From returns the new URI from args. +func From(scheme, authority, path, query, fragment string) URI { + switch scheme { + case FileScheme: + u := url.URL{ + Scheme: FileScheme, + Path: path, + RawPath: filepath.FromSlash(path), + } + return URI(u.String()) + + case HTTPScheme, HTTPSScheme: + u := url.URL{ + Scheme: scheme, + Host: authority, + Path: path, + RawQuery: url.QueryEscape(query), + Fragment: fragment, + } + return URI(u.String()) + + default: + panic(fmt.Sprintf("unknown scheme: %s", scheme)) + } +} + +// isWindowsDrivePath returns true if the file path is of the form used by Windows. +// +// We check if the path begins with a drive letter, followed by a ":". +func isWindowsDrivePath(path string) bool { + if len(path) < 4 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} + +// isWindowsDriveURI returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see https://golang.org/issue/6027). We check if the URI path has +// a drive prefix (e.g. "/C:"). If so, we trim the leading "/". +func isWindowsDriveURI(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go new file mode 100644 index 00000000000..aa02eeda680 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" +) + +// An Analyzer describes an analysis function and its options. +type Analyzer struct { + // The Name of the analyzer must be a valid Go identifier + // as it may appear in command-line flags, URLs, and so on. + Name string + + // Doc is the documentation for the analyzer. + // The part before the first "\n\n" is the title + // (no capital or period, max ~60 letters). + Doc string + + // URL holds an optional link to a web page with additional + // documentation for this analyzer. + URL string + + // Flags defines any flags accepted by the analyzer. + // The manner in which these flags are exposed to the user + // depends on the driver which runs the analyzer. + Flags flag.FlagSet + + // Run applies the analyzer to a package. + // It returns an error if the analyzer failed. + // + // On success, the Run function may return a result + // computed by the Analyzer; its type must match ResultType. + // The driver makes this result available as an input to + // another Analyzer that depends directly on this one (see + // Requires) when it analyzes the same package. + // + // To pass analysis results between packages (and thus + // potentially between address spaces), use Facts, which are + // serializable. + Run func(*Pass) (interface{}, error) + + // RunDespiteErrors allows the driver to invoke + // the Run method of this analyzer even on a + // package that contains parse or type errors. + // The Pass.TypeErrors field may consequently be non-empty. + RunDespiteErrors bool + + // Requires is a set of analyzers that must run successfully + // before this one on a given package. This analyzer may inspect + // the outputs produced by each analyzer in Requires. + // The graph over analyzers implied by Requires edges must be acyclic. + // + // Requires establishes a "horizontal" dependency between + // analysis passes (different analyzers, same package). + Requires []*Analyzer + + // ResultType is the type of the optional result of the Run function. + ResultType reflect.Type + + // FactTypes indicates that this analyzer imports and exports + // Facts of the specified concrete types. + // An analyzer that uses facts may assume that its import + // dependencies have been similarly analyzed before it runs. + // Facts must be pointers. + // + // FactTypes establishes a "vertical" dependency between + // analysis passes (same analyzer, different packages). + FactTypes []Fact +} + +func (a *Analyzer) String() string { return a.Name } + +// A Pass provides information to the Run function that +// applies a specific analyzer to a single Go package. +// +// It forms the interface between the analysis logic and the driver +// program, and has both input and an output components. +// +// As in a compiler, one pass may depend on the result computed by another. +// +// The Run function should not call any of the Pass functions concurrently. +type Pass struct { + Analyzer *Analyzer // the identity of the current analyzer + + // syntax and type information + Fset *token.FileSet // file position information; Run may add new files + Files []*ast.File // the abstract syntax tree of each file + OtherFiles []string // names of non-Go files of this package + IgnoredFiles []string // names of ignored source files in this package + Pkg *types.Package // type information about the package + TypesInfo *types.Info // type information about the syntax trees + TypesSizes types.Sizes // function for computing sizes of types + TypeErrors []types.Error // type errors (only if Analyzer.RunDespiteErrors) + + Module *Module // the package's enclosing module (possibly nil in some drivers) + + // Report reports a Diagnostic, a finding about a specific location + // in the analyzed source code such as a potential mistake. + // It may be called by the Run function. + Report func(Diagnostic) + + // ResultOf provides the inputs to this analysis pass, which are + // the corresponding results of its prerequisite analyzers. + // The map keys are the elements of Analysis.Required, + // and the type of each corresponding value is the required + // analysis's ResultType. + ResultOf map[*Analyzer]interface{} + + // ReadFile returns the contents of the named file. + // + // The only valid file names are the elements of OtherFiles + // and IgnoredFiles, and names returned by + // Fset.File(f.FileStart).Name() for each f in Files. + // + // Analyzers must use this function (if provided) instead of + // accessing the file system directly. This allows a driver to + // provide a virtualized file tree (including, for example, + // unsaved editor buffers) and to track dependencies precisely + // to avoid unnecessary recomputation. + ReadFile func(filename string) ([]byte, error) + + // -- facts -- + + // ImportObjectFact retrieves a fact associated with obj. + // Given a value ptr of type *T, where *T satisfies Fact, + // ImportObjectFact copies the value to *ptr. + // + // ImportObjectFact panics if called after the pass is complete. + // ImportObjectFact is not concurrency-safe. + ImportObjectFact func(obj types.Object, fact Fact) bool + + // ImportPackageFact retrieves a fact associated with package pkg, + // which must be this package or one of its dependencies. + // See comments for ImportObjectFact. + ImportPackageFact func(pkg *types.Package, fact Fact) bool + + // ExportObjectFact associates a fact of type *T with the obj, + // replacing any previous fact of that type. + // + // ExportObjectFact panics if it is called after the pass is + // complete, or if obj does not belong to the package being analyzed. + // ExportObjectFact is not concurrency-safe. + ExportObjectFact func(obj types.Object, fact Fact) + + // ExportPackageFact associates a fact with the current package. + // See comments for ExportObjectFact. + ExportPackageFact func(fact Fact) + + // AllPackageFacts returns a new slice containing all package + // facts of the analysis's FactTypes in unspecified order. + AllPackageFacts func() []PackageFact + + // AllObjectFacts returns a new slice containing all object + // facts of the analysis's FactTypes in unspecified order. + AllObjectFacts func() []ObjectFact + + /* Further fields may be added in future. */ +} + +// PackageFact is a package together with an associated fact. +type PackageFact struct { + Package *types.Package + Fact Fact +} + +// ObjectFact is an object together with an associated fact. +type ObjectFact struct { + Object types.Object + Fact Fact +} + +// Reportf is a helper function that reports a Diagnostic using the +// specified position and formatted error message. +func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: pos, Message: msg}) +} + +// The Range interface provides a range. It's equivalent to and satisfied by +// ast.Node. +type Range interface { + Pos() token.Pos // position of first character belonging to the node + End() token.Pos // position of first character immediately after the node +} + +// ReportRangef is a helper function that reports a Diagnostic using the +// range provided. ast.Node values can be passed in as the range because +// they satisfy the Range interface. +func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) +} + +func (pass *Pass) String() string { + return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path()) +} + +// A Fact is an intermediate fact produced during analysis. +// +// Each fact is associated with a named declaration (a types.Object) or +// with a package as a whole. A single object or package may have +// multiple associated facts, but only one of any particular fact type. +// +// A Fact represents a predicate such as "never returns", but does not +// represent the subject of the predicate such as "function F" or "package P". +// +// Facts may be produced in one analysis pass and consumed by another +// analysis pass even if these are in different address spaces. +// If package P imports Q, all facts about Q produced during +// analysis of that package will be available during later analysis of P. +// Facts are analogous to type export data in a build system: +// just as export data enables separate compilation of several passes, +// facts enable "separate analysis". +// +// Each pass (a, p) starts with the set of facts produced by the +// same analyzer a applied to the packages directly imported by p. +// The analysis may add facts to the set, and they may be exported in turn. +// An analysis's Run function may retrieve facts by calling +// Pass.Import{Object,Package}Fact and update them using +// Pass.Export{Object,Package}Fact. +// +// A fact is logically private to its Analysis. To pass values +// between different analyzers, use the results mechanism; +// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf. +// +// A Fact type must be a pointer. +// Facts are encoded and decoded using encoding/gob. +// A Fact may implement the GobEncoder/GobDecoder interfaces +// to customize its encoding. Fact encoding should not fail. +// +// A Fact should not be modified once exported. +type Fact interface { + AFact() // dummy method to avoid type errors +} + +// A Module describes the module to which a package belongs. +type Module struct { + Path string // module path + Version string // module version ("" if unknown, such as for workspace modules) + GoVersion string // go version used in module (e.g. "go1.22.0") +} diff --git a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go new file mode 100644 index 00000000000..c1b2dd4fa1b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go @@ -0,0 +1,695 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysistest provides utilities for testing analyzers. +package analysistest + +import ( + "bytes" + "fmt" + "go/format" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "testing" + "text/scanner" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/txtar" +) + +// WriteFiles is a helper function that creates a temporary directory +// and populates it with a GOPATH-style project using filemap (which +// maps file names to contents). On success it returns the name of the +// directory and a cleanup function to delete it. +func WriteFiles(filemap map[string]string) (dir string, cleanup func(), err error) { + gopath, err := os.MkdirTemp("", "analysistest") + if err != nil { + return "", nil, err + } + cleanup = func() { os.RemoveAll(gopath) } + + for name, content := range filemap { + filename := filepath.Join(gopath, "src", name) + os.MkdirAll(filepath.Dir(filename), 0777) // ignore error + if err := os.WriteFile(filename, []byte(content), 0666); err != nil { + cleanup() + return "", nil, err + } + } + return gopath, cleanup, nil +} + +// TestData returns the effective filename of +// the program's "testdata" directory. +// This function may be overridden by projects using +// an alternative build system (such as Blaze) that +// does not run a test in its package directory. +var TestData = func() string { + testdata, err := filepath.Abs("testdata") + if err != nil { + log.Fatal(err) + } + return testdata +} + +// Testing is an abstraction of a *testing.T. +type Testing interface { + Errorf(format string, args ...interface{}) +} + +// RunWithSuggestedFixes behaves like Run, but additionally verifies suggested fixes. +// It uses golden files placed alongside the source code under analysis: +// suggested fixes for code in example.go will be compared against example.go.golden. +// +// Golden files can be formatted in one of two ways: as plain Go source code, or as txtar archives. +// In the first case, all suggested fixes will be applied to the original source, which will then be compared against the golden file. +// In the second case, suggested fixes will be grouped by their messages, and each set of fixes will be applied and tested separately. +// Each section in the archive corresponds to a single message. +// +// A golden file using txtar may look like this: +// +// -- turn into single negation -- +// package pkg +// +// func fn(b1, b2 bool) { +// if !b1 { // want `negating a boolean twice` +// println() +// } +// } +// +// -- remove double negation -- +// package pkg +// +// func fn(b1, b2 bool) { +// if b1 { // want `negating a boolean twice` +// println() +// } +// } +// +// # Conflicts +// +// A single analysis pass may offer two or more suggested fixes that +// (1) conflict but are nonetheless logically composable, (e.g. +// because both update the import declaration), or (2) are +// fundamentally incompatible (e.g. alternative fixes to the same +// statement). +// +// It is up to the driver to decide how to apply such fixes. A +// sophisticated driver could attempt to resolve conflicts of the +// first kind, but this test driver simply reports the fact of the +// conflict with the expectation that the user will split their tests +// into nonconflicting parts. +// +// Conflicts of the second kind can be avoided by giving the +// alternative fixes different names (SuggestedFix.Message) and +// defining the .golden file as a multi-section txtar file with a +// named section for each alternative fix, as shown above. +// +// Analyzers that compute fixes from a textual diff of the +// before/after file contents (instead of directly from syntax tree +// positions) may produce fixes that, although logically +// non-conflicting, nonetheless conflict due to the particulars of the +// diff algorithm. In such cases it may suffice to introduce +// sufficient separation of the statements in the test input so that +// the computed diffs do not overlap. If that fails, break the test +// into smaller parts. +// +// TODO(adonovan): the behavior of RunWithSuggestedFixes as documented +// above is impractical for tests that report multiple diagnostics and +// offer multiple alternative fixes for the same diagnostic, and it is +// inconsistent with the interpretation of multiple diagnostics +// described at Diagnostic.SuggestedFixes. +// We need to rethink the analyzer testing API to better support such +// cases. In the meantime, users of RunWithSuggestedFixes testing +// analyzers that offer alternative fixes are advised to put each fix +// in a separate .go file in the testdata. +func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result { + r := Run(t, dir, a, patterns...) + + // If the immediate caller of RunWithSuggestedFixes is in + // x/tools, we apply stricter checks as required by gopls. + inTools := false + { + var pcs [1]uintptr + n := runtime.Callers(1, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + fr, _ := frames.Next() + if fr.Func != nil && strings.HasPrefix(fr.Func.Name(), "golang.org/x/tools/") { + inTools = true + } + } + + // Process each result (package) separately, matching up the suggested + // fixes into a diff, which we will compare to the .golden file. We have + // to do this per-result in case a file appears in two packages, such as in + // packages with tests, where mypkg/a.go will appear in both mypkg and + // mypkg.test. In that case, the analyzer may suggest the same set of + // changes to a.go for each package. If we merge all the results, those + // changes get doubly applied, which will cause conflicts or mismatches. + // Validating the results separately means as long as the two analyses + // don't produce conflicting suggestions for a single file, everything + // should match up. + for _, act := range r { + // file -> message -> edits + fileEdits := make(map[*token.File]map[string][]diff.Edit) + fileContents := make(map[*token.File][]byte) + + // Validate edits, prepare the fileEdits map and read the file contents. + for _, diag := range act.Diagnostics { + for _, fix := range diag.SuggestedFixes { + + // Assert that lazy fixes have a Category (#65578, #65087). + if inTools && len(fix.TextEdits) == 0 && diag.Category == "" { + t.Errorf("missing Diagnostic.Category for SuggestedFix without TextEdits (gopls requires the category for the name of the fix command") + } + + for _, edit := range fix.TextEdits { + start, end := edit.Pos, edit.End + if !end.IsValid() { + end = start + } + // Validate the edit. + if start > end { + t.Errorf( + "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)", + act.Pass.Analyzer.Name, start, end) + continue + } + file, endfile := act.Pass.Fset.File(start), act.Pass.Fset.File(end) + if file == nil || endfile == nil || file != endfile { + t.Errorf( + "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v", + act.Pass.Analyzer.Name, file.Name(), endfile.Name()) + continue + } + if _, ok := fileContents[file]; !ok { + contents, err := os.ReadFile(file.Name()) + if err != nil { + t.Errorf("error reading %s: %v", file.Name(), err) + } + fileContents[file] = contents + } + if _, ok := fileEdits[file]; !ok { + fileEdits[file] = make(map[string][]diff.Edit) + } + fileEdits[file][fix.Message] = append(fileEdits[file][fix.Message], diff.Edit{ + Start: file.Offset(start), + End: file.Offset(end), + New: string(edit.NewText), + }) + } + } + } + + for file, fixes := range fileEdits { + // Get the original file contents. + orig, ok := fileContents[file] + if !ok { + t.Errorf("could not find file contents for %s", file.Name()) + continue + } + + // Get the golden file and read the contents. + ar, err := txtar.ParseFile(file.Name() + ".golden") + if err != nil { + t.Errorf("error reading %s.golden: %v", file.Name(), err) + continue + } + + if len(ar.Files) > 0 { + // one virtual file per kind of suggested fix + + if len(ar.Comment) != 0 { + // we allow either just the comment, or just virtual + // files, not both. it is not clear how "both" should + // behave. + t.Errorf("%s.golden has leading comment; we don't know what to do with it", file.Name()) + continue + } + + for sf, edits := range fixes { + found := false + for _, vf := range ar.Files { + if vf.Name == sf { + found = true + // the file may contain multiple trailing + // newlines if the user places empty lines + // between files in the archive. normalize + // this to a single newline. + golden := append(bytes.TrimRight(vf.Data, "\n"), '\n') + + if err := applyDiffsAndCompare(orig, golden, edits, file.Name()); err != nil { + t.Errorf("%s", err) + } + break + } + } + if !found { + t.Errorf("no section for suggested fix %q in %s.golden", sf, file.Name()) + } + } + } else { + // all suggested fixes are represented by a single file + + var catchallEdits []diff.Edit + for _, edits := range fixes { + catchallEdits = append(catchallEdits, edits...) + } + + if err := applyDiffsAndCompare(orig, ar.Comment, catchallEdits, file.Name()); err != nil { + t.Errorf("%s", err) + } + } + } + } + return r +} + +// applyDiffsAndCompare applies edits to src and compares the results against +// golden after formatting both. fileName is use solely for error reporting. +func applyDiffsAndCompare(src, golden []byte, edits []diff.Edit, fileName string) error { + out, err := diff.ApplyBytes(src, edits) + if err != nil { + return fmt.Errorf("%s: error applying fixes: %v (see possible explanations at RunWithSuggestedFixes)", fileName, err) + } + wantRaw, err := format.Source(golden) + if err != nil { + return fmt.Errorf("%s.golden: error formatting golden file: %v\n%s", fileName, err, out) + } + want := string(wantRaw) + + formatted, err := format.Source(out) + if err != nil { + return fmt.Errorf("%s: error formatting resulting source: %v\n%s", fileName, err, out) + } + if got := string(formatted); got != want { + unified := diff.Unified(fileName+".golden", "actual", want, got) + return fmt.Errorf("suggested fixes failed for %s:\n%s", fileName, unified) + } + return nil +} + +// Run applies an analysis to the packages denoted by the "go list" patterns. +// +// It loads the packages from the specified +// directory using golang.org/x/tools/go/packages, runs the analysis on +// them, and checks that each analysis emits the expected diagnostics +// and facts specified by the contents of '// want ...' comments in the +// package's source files. It treats a comment of the form +// "//...// want..." or "/*...// want... */" as if it starts at 'want'. +// +// If the directory contains a go.mod file, Run treats it as the root of the +// Go module in which to work. Otherwise, Run treats it as the root of a +// GOPATH-style tree, with package contained in the src subdirectory. +// +// An expectation of a Diagnostic is specified by a string literal +// containing a regular expression that must match the diagnostic +// message. For example: +// +// fmt.Printf("%s", 1) // want `cannot provide int 1 to %s` +// +// An expectation of a Fact associated with an object is specified by +// 'name:"pattern"', where name is the name of the object, which must be +// declared on the same line as the comment, and pattern is a regular +// expression that must match the string representation of the fact, +// fmt.Sprint(fact). For example: +// +// func panicf(format string, args interface{}) { // want panicf:"printfWrapper" +// +// Package facts are specified by the name "package" and appear on +// line 1 of the first source file of the package. +// +// A single 'want' comment may contain a mixture of diagnostic and fact +// expectations, including multiple facts about the same object: +// +// // want "diag" "diag2" x:"fact1" x:"fact2" y:"fact3" +// +// Unexpected diagnostics and facts, and unmatched expectations, are +// reported as errors to the Testing. +// +// Run reports an error to the Testing if loading or analysis failed. +// Run also returns a Result for each package for which analysis was +// attempted, even if unsuccessful. It is safe for a test to ignore all +// the results, but a test may use it to perform additional checks. +func Run(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result { + if t, ok := t.(testing.TB); ok { + testenv.NeedsGoPackages(t) + } + + pkgs, err := loadPackages(a, dir, patterns...) + if err != nil { + t.Errorf("loading %s: %v", patterns, err) + return nil + } + + if err := analysis.Validate([]*analysis.Analyzer{a}); err != nil { + t.Errorf("Validate: %v", err) + return nil + } + + results := checker.TestAnalyzer(a, pkgs) + for _, result := range results { + if result.Err != nil { + t.Errorf("error analyzing %s: %v", result.Pass, result.Err) + } else { + check(t, dir, result.Pass, result.Diagnostics, result.Facts) + } + } + return results +} + +// A Result holds the result of applying an analyzer to a package. +type Result = checker.TestAnalyzerResult + +// loadPackages uses go/packages to load a specified packages (from source, with +// dependencies) from dir, which is the root of a GOPATH-style project tree. +// loadPackages returns an error if any package had an error, or the pattern +// matched no packages. +func loadPackages(a *analysis.Analyzer, dir string, patterns ...string) ([]*packages.Package, error) { + env := []string{"GOPATH=" + dir, "GO111MODULE=off", "GOWORK=off"} // GOPATH mode + + // Undocumented module mode. Will be replaced by something better. + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + gowork := filepath.Join(dir, "go.work") + if _, err := os.Stat(gowork); err != nil { + gowork = "off" + } + + env = []string{"GO111MODULE=on", "GOPROXY=off", "GOWORK=" + gowork} // module mode + } + + // packages.Load loads the real standard library, not a minimal + // fake version, which would be more efficient, especially if we + // have many small tests that import, say, net/http. + // However there is no easy way to make go/packages to consume + // a list of packages we generate and then do the parsing and + // typechecking, though this feature seems to be a recurring need. + + mode := packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedImports | + packages.NeedTypes | packages.NeedTypesSizes | packages.NeedSyntax | packages.NeedTypesInfo | + packages.NeedDeps | packages.NeedModule + cfg := &packages.Config{ + Mode: mode, + Dir: dir, + Tests: true, + Env: append(os.Environ(), env...), + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + return nil, err + } + + // If any named package couldn't be loaded at all + // (e.g. the Name field is unset), fail fast. + for _, pkg := range pkgs { + if pkg.Name == "" { + return nil, fmt.Errorf("failed to load %q: Errors=%v", + pkg.PkgPath, pkg.Errors) + } + } + + // Do NOT print errors if the analyzer will continue running. + // It is incredibly confusing for tests to be printing to stderr + // willy-nilly instead of their test logs, especially when the + // errors are expected and are going to be fixed. + if !a.RunDespiteErrors { + if packages.PrintErrors(pkgs) > 0 { + return nil, fmt.Errorf("there were package loading errors (and RunDespiteErrors is false)") + } + } + + if len(pkgs) == 0 { + return nil, fmt.Errorf("no packages matched %s", patterns) + } + return pkgs, nil +} + +// check inspects an analysis pass on which the analysis has already +// been run, and verifies that all reported diagnostics and facts match +// specified by the contents of "// want ..." comments in the package's +// source files, which must have been parsed with comments enabled. +func check(t Testing, gopath string, pass *analysis.Pass, diagnostics []analysis.Diagnostic, facts map[types.Object][]analysis.Fact) { + type key struct { + file string + line int + } + + want := make(map[key][]expectation) + + // processComment parses expectations out of comments. + processComment := func(filename string, linenum int, text string) { + text = strings.TrimSpace(text) + + // Any comment starting with "want" is treated + // as an expectation, even without following whitespace. + if rest := strings.TrimPrefix(text, "want"); rest != text { + lineDelta, expects, err := parseExpectations(rest) + if err != nil { + t.Errorf("%s:%d: in 'want' comment: %s", filename, linenum, err) + return + } + if expects != nil { + want[key{filename, linenum + lineDelta}] = expects + } + } + } + + // Extract 'want' comments from parsed Go files. + for _, f := range pass.Files { + for _, cgroup := range f.Comments { + for _, c := range cgroup.List { + + text := strings.TrimPrefix(c.Text, "//") + if text == c.Text { // not a //-comment. + text = strings.TrimPrefix(text, "/*") + text = strings.TrimSuffix(text, "*/") + } + + // Hack: treat a comment of the form "//...// want..." + // or "/*...// want... */ + // as if it starts at 'want'. + // This allows us to add comments on comments, + // as required when testing the buildtag analyzer. + if i := strings.Index(text, "// want"); i >= 0 { + text = text[i+len("// "):] + } + + // It's tempting to compute the filename + // once outside the loop, but it's + // incorrect because it can change due + // to //line directives. + posn := pass.Fset.Position(c.Pos()) + filename := sanitize(gopath, posn.Filename) + processComment(filename, posn.Line, text) + } + } + } + + // Extract 'want' comments from non-Go files. + // TODO(adonovan): we may need to handle //line directives. + for _, filename := range pass.OtherFiles { + data, err := os.ReadFile(filename) + if err != nil { + t.Errorf("can't read '// want' comments from %s: %v", filename, err) + continue + } + filename := sanitize(gopath, filename) + linenum := 0 + for _, line := range strings.Split(string(data), "\n") { + linenum++ + + // Hack: treat a comment of the form "//...// want..." + // or "/*...// want... */ + // as if it starts at 'want'. + // This allows us to add comments on comments, + // as required when testing the buildtag analyzer. + if i := strings.Index(line, "// want"); i >= 0 { + line = line[i:] + } + + if i := strings.Index(line, "//"); i >= 0 { + line = line[i+len("//"):] + processComment(filename, linenum, line) + } + } + } + + checkMessage := func(posn token.Position, kind, name, message string) { + posn.Filename = sanitize(gopath, posn.Filename) + k := key{posn.Filename, posn.Line} + expects := want[k] + var unmatched []string + for i, exp := range expects { + if exp.kind == kind && exp.name == name { + if exp.rx.MatchString(message) { + // matched: remove the expectation. + expects[i] = expects[len(expects)-1] + expects = expects[:len(expects)-1] + want[k] = expects + return + } + unmatched = append(unmatched, fmt.Sprintf("%#q", exp.rx)) + } + } + if unmatched == nil { + t.Errorf("%v: unexpected %s: %v", posn, kind, message) + } else { + t.Errorf("%v: %s %q does not match pattern %s", + posn, kind, message, strings.Join(unmatched, " or ")) + } + } + + // Check the diagnostics match expectations. + for _, f := range diagnostics { + // TODO(matloob): Support ranges in analysistest. + posn := pass.Fset.Position(f.Pos) + checkMessage(posn, "diagnostic", "", f.Message) + } + + // Check the facts match expectations. + // Report errors in lexical order for determinism. + // (It's only deterministic within each file, not across files, + // because go/packages does not guarantee file.Pos is ascending + // across the files of a single compilation unit.) + var objects []types.Object + for obj := range facts { + objects = append(objects, obj) + } + sort.Slice(objects, func(i, j int) bool { + // Package facts compare less than object facts. + ip, jp := objects[i] == nil, objects[j] == nil // whether i, j is a package fact + if ip != jp { + return ip && !jp + } + return objects[i].Pos() < objects[j].Pos() + }) + for _, obj := range objects { + var posn token.Position + var name string + if obj != nil { + // Object facts are reported on the declaring line. + name = obj.Name() + posn = pass.Fset.Position(obj.Pos()) + } else { + // Package facts are reported at the start of the file. + name = "package" + posn = pass.Fset.Position(pass.Files[0].Pos()) + posn.Line = 1 + } + + for _, fact := range facts[obj] { + checkMessage(posn, "fact", name, fmt.Sprint(fact)) + } + } + + // Reject surplus expectations. + // + // Sometimes an Analyzer reports two similar diagnostics on a + // line with only one expectation. The reader may be confused by + // the error message. + // TODO(adonovan): print a better error: + // "got 2 diagnostics here; each one needs its own expectation". + var surplus []string + for key, expects := range want { + for _, exp := range expects { + err := fmt.Sprintf("%s:%d: no %s was reported matching %#q", key.file, key.line, exp.kind, exp.rx) + surplus = append(surplus, err) + } + } + sort.Strings(surplus) + for _, err := range surplus { + t.Errorf("%s", err) + } +} + +type expectation struct { + kind string // either "fact" or "diagnostic" + name string // name of object to which fact belongs, or "package" ("fact" only) + rx *regexp.Regexp +} + +func (ex expectation) String() string { + return fmt.Sprintf("%s %s:%q", ex.kind, ex.name, ex.rx) // for debugging +} + +// parseExpectations parses the content of a "// want ..." comment +// and returns the expectations, a mixture of diagnostics ("rx") and +// facts (name:"rx"). +func parseExpectations(text string) (lineDelta int, expects []expectation, err error) { + var scanErr string + sc := new(scanner.Scanner).Init(strings.NewReader(text)) + sc.Error = func(s *scanner.Scanner, msg string) { + scanErr = msg // e.g. bad string escape + } + sc.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanInts + + scanRegexp := func(tok rune) (*regexp.Regexp, error) { + if tok != scanner.String && tok != scanner.RawString { + return nil, fmt.Errorf("got %s, want regular expression", + scanner.TokenString(tok)) + } + pattern, _ := strconv.Unquote(sc.TokenText()) // can't fail + return regexp.Compile(pattern) + } + + for { + tok := sc.Scan() + switch tok { + case '+': + tok = sc.Scan() + if tok != scanner.Int { + return 0, nil, fmt.Errorf("got +%s, want +Int", scanner.TokenString(tok)) + } + lineDelta, _ = strconv.Atoi(sc.TokenText()) + case scanner.String, scanner.RawString: + rx, err := scanRegexp(tok) + if err != nil { + return 0, nil, err + } + expects = append(expects, expectation{"diagnostic", "", rx}) + + case scanner.Ident: + name := sc.TokenText() + tok = sc.Scan() + if tok != ':' { + return 0, nil, fmt.Errorf("got %s after %s, want ':'", + scanner.TokenString(tok), name) + } + tok = sc.Scan() + rx, err := scanRegexp(tok) + if err != nil { + return 0, nil, err + } + expects = append(expects, expectation{"fact", name, rx}) + + case scanner.EOF: + if scanErr != "" { + return 0, nil, fmt.Errorf("%s", scanErr) + } + return lineDelta, expects, nil + + default: + return 0, nil, fmt.Errorf("unexpected %s", scanner.TokenString(tok)) + } + } +} + +// sanitize removes the GOPATH portion of the filename, +// typically a gnarly /tmp directory, and returns the rest. +func sanitize(gopath, filename string) string { + prefix := gopath + string(os.PathSeparator) + "src" + string(os.PathSeparator) + return filepath.ToSlash(strings.TrimPrefix(filename, prefix)) +} diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go new file mode 100644 index 00000000000..ee083a2d686 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import "go/token" + +// A Diagnostic is a message associated with a source location or range. +// +// An Analyzer may return a variety of diagnostics; the optional Category, +// which should be a constant, may be used to classify them. +// It is primarily intended to make it easy to look up documentation. +// +// All Pos values are interpreted relative to Pass.Fset. If End is +// provided, the diagnostic is specified to apply to the range between +// Pos and End. +type Diagnostic struct { + Pos token.Pos + End token.Pos // optional + Category string // optional + Message string + + // URL is the optional location of a web page that provides + // additional documentation for this diagnostic. + // + // If URL is empty but a Category is specified, then the + // Analysis driver should treat the URL as "#"+Category. + // + // The URL may be relative. If so, the base URL is that of the + // Analyzer that produced the diagnostic; + // see https://pkg.go.dev/net/url#URL.ResolveReference. + URL string + + // SuggestedFixes is an optional list of fixes to address the + // problem described by the diagnostic. Each one represents + // an alternative strategy; at most one may be applied. + // + // Fixes for different diagnostics should be treated as + // independent changes to the same baseline file state, + // analogous to a set of git commits all with the same parent. + // Combining fixes requires resolving any conflicts that + // arise, analogous to a git merge. + // Any conflicts that remain may be dealt with, depending on + // the tool, by discarding fixes, consulting the user, or + // aborting the operation. + SuggestedFixes []SuggestedFix + + // Related contains optional secondary positions and messages + // related to the primary diagnostic. + Related []RelatedInformation +} + +// RelatedInformation contains information related to a diagnostic. +// For example, a diagnostic that flags duplicated declarations of a +// variable may include one RelatedInformation per existing +// declaration. +type RelatedInformation struct { + Pos token.Pos + End token.Pos // optional + Message string +} + +// A SuggestedFix is a code change associated with a Diagnostic that a +// user can choose to apply to their code. Usually the SuggestedFix is +// meant to fix the issue flagged by the diagnostic. +// +// The TextEdits must not overlap, nor contain edits for other packages. +type SuggestedFix struct { + // A verb phrase describing the fix, to be shown to + // a user trying to decide whether to accept it. + // + // Example: "Remove the surplus argument" + Message string + TextEdits []TextEdit +} + +// A TextEdit represents the replacement of the code between Pos and End with the new text. +// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. +type TextEdit struct { + // For a pure insertion, End can either be set to Pos or token.NoPos. + Pos token.Pos + End token.Pos + NewText []byte +} diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go new file mode 100644 index 00000000000..2a0aa577126 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package analysis defines the interface between a modular static +analysis and an analysis driver program. + +# Background + +A static analysis is a function that inspects a package of Go code and +reports a set of diagnostics (typically mistakes in the code), and +perhaps produces other results as well, such as suggested refactorings +or other facts. An analysis that reports mistakes is informally called a +"checker". For example, the printf checker reports mistakes in +fmt.Printf format strings. + +A "modular" analysis is one that inspects one package at a time but can +save information from a lower-level package and use it when inspecting a +higher-level package, analogous to separate compilation in a toolchain. +The printf checker is modular: when it discovers that a function such as +log.Fatalf delegates to fmt.Printf, it records this fact, and checks +calls to that function too, including calls made from another package. + +By implementing a common interface, checkers from a variety of sources +can be easily selected, incorporated, and reused in a wide range of +driver programs including command-line tools (such as vet), text editors and +IDEs, build and test systems (such as go build, Bazel, or Buck), test +frameworks, code review tools, code-base indexers (such as SourceGraph), +documentation viewers (such as godoc), batch pipelines for large code +bases, and so on. + +# Analyzer + +The primary type in the API is [Analyzer]. An Analyzer statically +describes an analysis function: its name, documentation, flags, +relationship to other analyzers, and of course, its logic. + +To define an analysis, a user declares a (logically constant) variable +of type Analyzer. Here is a typical example from one of the analyzers in +the go/analysis/passes/ subdirectory: + + package unusedresult + + var Analyzer = &analysis.Analyzer{ + Name: "unusedresult", + Doc: "check for unused results of calls to some functions", + Run: run, + ... + } + + func run(pass *analysis.Pass) (interface{}, error) { + ... + } + +An analysis driver is a program such as vet that runs a set of +analyses and prints the diagnostics that they report. +The driver program must import the list of Analyzers it needs. +Typically each Analyzer resides in a separate package. +To add a new Analyzer to an existing driver, add another item to the list: + + import ( "unusedresult"; "nilness"; "printf" ) + + var analyses = []*analysis.Analyzer{ + unusedresult.Analyzer, + nilness.Analyzer, + printf.Analyzer, + } + +A driver may use the name, flags, and documentation to provide on-line +help that describes the analyses it performs. +The doc comment contains a brief one-line summary, +optionally followed by paragraphs of explanation. + +The [Analyzer] type has more fields besides those shown above: + + type Analyzer struct { + Name string + Doc string + Flags flag.FlagSet + Run func(*Pass) (interface{}, error) + RunDespiteErrors bool + ResultType reflect.Type + Requires []*Analyzer + FactTypes []Fact + } + +The Flags field declares a set of named (global) flag variables that +control analysis behavior. Unlike vet, analysis flags are not declared +directly in the command line FlagSet; it is up to the driver to set the +flag variables. A driver for a single analysis, a, might expose its flag +f directly on the command line as -f, whereas a driver for multiple +analyses might prefix the flag name by the analysis name (-a.f) to avoid +ambiguity. An IDE might expose the flags through a graphical interface, +and a batch pipeline might configure them from a config file. +See the "findcall" analyzer for an example of flags in action. + +The RunDespiteErrors flag indicates whether the analysis is equipped to +handle ill-typed code. If not, the driver will skip the analysis if +there were parse or type errors. +The optional ResultType field specifies the type of the result value +computed by this analysis and made available to other analyses. +The Requires field specifies a list of analyses upon which +this one depends and whose results it may access, and it constrains the +order in which a driver may run analyses. +The FactTypes field is discussed in the section on Modularity. +The analysis package provides a Validate function to perform basic +sanity checks on an Analyzer, such as that its Requires graph is +acyclic, its fact and result types are unique, and so on. + +Finally, the Run field contains a function to be called by the driver to +execute the analysis on a single package. The driver passes it an +instance of the Pass type. + +# Pass + +A [Pass] describes a single unit of work: the application of a particular +Analyzer to a particular package of Go code. +The Pass provides information to the Analyzer's Run function about the +package being analyzed, and provides operations to the Run function for +reporting diagnostics and other information back to the driver. + + type Pass struct { + Fset *token.FileSet + Files []*ast.File + OtherFiles []string + IgnoredFiles []string + Pkg *types.Package + TypesInfo *types.Info + ResultOf map[*Analyzer]interface{} + Report func(Diagnostic) + ... + } + +The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees, +type information, and source positions for a single package of Go code. + +The OtherFiles field provides the names of non-Go +files such as assembly that are part of this package. +Similarly, the IgnoredFiles field provides the names of Go and non-Go +source files that are not part of this package with the current build +configuration but may be part of other build configurations. +The contents of these files may be read using Pass.ReadFile; +see the "asmdecl" or "buildtags" analyzers for examples of loading +non-Go files and reporting diagnostics against them. + +The ResultOf field provides the results computed by the analyzers +required by this one, as expressed in its Analyzer.Requires field. The +driver runs the required analyzers first and makes their results +available in this map. Each Analyzer must return a value of the type +described in its Analyzer.ResultType field. +For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which +provides a control-flow graph for each function in the package (see +golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that +enables other Analyzers to traverse the syntax trees of the package more +efficiently; and the "buildssa" analyzer constructs an SSA-form +intermediate representation. +Each of these Analyzers extends the capabilities of later Analyzers +without adding a dependency to the core API, so an analysis tool pays +only for the extensions it needs. + +The Report function emits a diagnostic, a message associated with a +source position. For most analyses, diagnostics are their primary +result. +For convenience, Pass provides a helper method, Reportf, to report a new +diagnostic by formatting a string. +Diagnostic is defined as: + + type Diagnostic struct { + Pos token.Pos + Category string // optional + Message string + } + +The optional Category field is a short identifier that classifies the +kind of message when an analysis produces several kinds of diagnostic. + +The [Diagnostic] struct does not have a field to indicate its severity +because opinions about the relative importance of Analyzers and their +diagnostics vary widely among users. The design of this framework does +not hold each Analyzer responsible for identifying the severity of its +diagnostics. Instead, we expect that drivers will allow the user to +customize the filtering and prioritization of diagnostics based on the +producing Analyzer and optional Category, according to the user's +preferences. + +Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl +and buildtag, inspect the raw text of Go source files or even non-Go +files such as assembly. To report a diagnostic against a line of a +raw text file, use the following sequence: + + content, err := pass.ReadFile(filename) + if err != nil { ... } + tf := fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + ... + pass.Reportf(tf.LineStart(line), "oops") + +# Modular analysis with Facts + +To improve efficiency and scalability, large programs are routinely +built using separate compilation: units of the program are compiled +separately, and recompiled only when one of their dependencies changes; +independent modules may be compiled in parallel. The same technique may +be applied to static analyses, for the same benefits. Such analyses are +described as "modular". + +A compilerā€™s type checker is an example of a modular static analysis. +Many other checkers we would like to apply to Go programs can be +understood as alternative or non-standard type systems. For example, +vet's printf checker infers whether a function has the "printf wrapper" +type, and it applies stricter checks to calls of such functions. In +addition, it records which functions are printf wrappers for use by +later analysis passes to identify other printf wrappers by induction. +A result such as ā€œf is a printf wrapperā€ that is not interesting by +itself but serves as a stepping stone to an interesting result (such as +a diagnostic) is called a [Fact]. + +The analysis API allows an analysis to define new types of facts, to +associate facts of these types with objects (named entities) declared +within the current package, or with the package as a whole, and to query +for an existing fact of a given type associated with an object or +package. + +An Analyzer that uses facts must declare their types: + + var Analyzer = &analysis.Analyzer{ + Name: "printf", + FactTypes: []analysis.Fact{new(isWrapper)}, + ... + } + + type isWrapper struct{} // => *types.Func f ā€œis a printf wrapperā€ + +The driver program ensures that facts for a passā€™s dependencies are +generated before analyzing the package and is responsible for propagating +facts from one package to another, possibly across address spaces. +Consequently, Facts must be serializable. The API requires that drivers +use the gob encoding, an efficient, robust, self-describing binary +protocol. A fact type may implement the GobEncoder/GobDecoder interfaces +if the default encoding is unsuitable. Facts should be stateless. +Because serialized facts may appear within build outputs, the gob encoding +of a fact must be deterministic, to avoid spurious cache misses in +build systems that use content-addressable caches. +The driver makes a single call to the gob encoder for all facts +exported by a given analysis pass, so that the topology of +shared data structures referenced by multiple facts is preserved. + +The Pass type has functions to import and export facts, +associated either with an object or with a package: + + type Pass struct { + ... + ExportObjectFact func(types.Object, Fact) + ImportObjectFact func(types.Object, Fact) bool + + ExportPackageFact func(fact Fact) + ImportPackageFact func(*types.Package, Fact) bool + } + +An Analyzer may only export facts associated with the current package or +its objects, though it may import facts from any package or object that +is an import dependency of the current package. + +Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by +the pair (obj, TypeOf(fact)), and the ImportObjectFact function +retrieves the entry from this map and copies its value into the variable +pointed to by fact. This scheme assumes that the concrete type of fact +is a pointer; this assumption is checked by the Validate function. +See the "printf" analyzer for an example of object facts in action. + +Some driver implementations (such as those based on Bazel and Blaze) do +not currently apply analyzers to packages of the standard library. +Therefore, for best results, analyzer authors should not rely on +analysis facts being available for standard packages. +For example, although the printf checker is capable of deducing during +analysis of the log package that log.Printf is a printf wrapper, +this fact is built in to the analyzer so that it correctly checks +calls to log.Printf even when run in a driver that does not apply +it to standard packages. We would like to remove this limitation in future. + +# Testing an Analyzer + +The analysistest subpackage provides utilities for testing an Analyzer. +In a few lines of code, it is possible to run an analyzer on a package +of testdata files and check that it reported all the expected +diagnostics and facts (and no more). Expectations are expressed using +"// want ..." comments in the input code. + +# Standalone commands + +Analyzers are provided in the form of packages that a driver program is +expected to import. The vet command imports a set of several analyzers, +but users may wish to define their own analysis commands that perform +additional checks. To simplify the task of creating an analysis command, +either for a single analyzer or for a whole suite, we provide the +singlechecker and multichecker subpackages. + +The singlechecker package provides the main function for a command that +runs one analyzer. By convention, each analyzer such as +go/analysis/passes/findcall should be accompanied by a singlechecker-based +command such as go/analysis/passes/findcall/cmd/findcall, defined in its +entirety as: + + package main + + import ( + "golang.org/x/tools/go/analysis/passes/findcall" + "golang.org/x/tools/go/analysis/singlechecker" + ) + + func main() { singlechecker.Main(findcall.Analyzer) } + +A tool that provides multiple analyzers can use multichecker in a +similar way, giving it the list of Analyzers. +*/ +package analysis diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go new file mode 100644 index 00000000000..ff14ff58f9c --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -0,0 +1,447 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisflags defines helpers for processing flags of +// analysis driver tools. +package analysisflags + +import ( + "crypto/sha256" + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/token" + "io" + "log" + "os" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// flags common to all {single,multi,unit}checkers. +var ( + JSON = false // -json + Context = -1 // -c=N: if N>0, display offending line plus N lines of context +) + +// Parse creates a flag for each of the analyzer's flags, +// including (in multi mode) a flag named after the analyzer, +// parses the flags, then filters and returns the list of +// analyzers enabled by flags. +// +// The result is intended to be passed to unitchecker.Run or checker.Run. +// Use in unitchecker.Run will gob.Register all fact types for the returned +// graph of analyzers but of course not the ones only reachable from +// dropped analyzers. To avoid inconsistency about which gob types are +// registered from run to run, Parse itself gob.Registers all the facts +// only reachable from dropped analyzers. +// This is not a particularly elegant API, but this is an internal package. +func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer { + // Connect each analysis flag to the command line as -analysis.flag. + enabled := make(map[*analysis.Analyzer]*triState) + for _, a := range analyzers { + var prefix string + + // Add -NAME flag to enable it. + if multi { + prefix = a.Name + "." + + enable := new(triState) + enableUsage := "enable " + a.Name + " analysis" + flag.Var(enable, a.Name, enableUsage) + enabled[a] = enable + } + + a.Flags.VisitAll(func(f *flag.Flag) { + if !multi && flag.Lookup(f.Name) != nil { + log.Printf("%s flag -%s would conflict with driver; skipping", a.Name, f.Name) + return + } + + name := prefix + f.Name + flag.Var(f.Value, name, f.Usage) + }) + } + + // standard flags: -flags, -V. + printflags := flag.Bool("flags", false, "print analyzer flags in JSON") + addVersionFlag() + + // flags common to all checkers + flag.BoolVar(&JSON, "json", JSON, "emit JSON output") + flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`) + + // Add shims for legacy vet flags to enable existing + // scripts that run vet to continue to work. + _ = flag.Bool("source", false, "no effect (deprecated)") + _ = flag.Bool("v", false, "no effect (deprecated)") + _ = flag.Bool("all", false, "no effect (deprecated)") + _ = flag.String("tags", "", "no effect (deprecated)") + for old, new := range vetLegacyFlags { + newFlag := flag.Lookup(new) + if newFlag != nil && flag.Lookup(old) == nil { + flag.Var(newFlag.Value, old, "deprecated alias for -"+new) + } + } + + flag.Parse() // (ExitOnError) + + // -flags: print flags so that go vet knows which ones are legitimate. + if *printflags { + printFlags() + os.Exit(0) + } + + everything := expand(analyzers) + + // If any -NAME flag is true, run only those analyzers. Otherwise, + // if any -NAME flag is false, run all but those analyzers. + if multi { + var hasTrue, hasFalse bool + for _, ts := range enabled { + switch *ts { + case setTrue: + hasTrue = true + case setFalse: + hasFalse = true + } + } + + var keep []*analysis.Analyzer + if hasTrue { + for _, a := range analyzers { + if *enabled[a] == setTrue { + keep = append(keep, a) + } + } + analyzers = keep + } else if hasFalse { + for _, a := range analyzers { + if *enabled[a] != setFalse { + keep = append(keep, a) + } + } + analyzers = keep + } + } + + // Register fact types of skipped analyzers + // in case we encounter them in imported files. + kept := expand(analyzers) + for a := range everything { + if !kept[a] { + for _, f := range a.FactTypes { + gob.Register(f) + } + } + } + + return analyzers +} + +func expand(analyzers []*analysis.Analyzer) map[*analysis.Analyzer]bool { + seen := make(map[*analysis.Analyzer]bool) + var visitAll func([]*analysis.Analyzer) + visitAll = func(analyzers []*analysis.Analyzer) { + for _, a := range analyzers { + if !seen[a] { + seen[a] = true + visitAll(a.Requires) + } + } + } + visitAll(analyzers) + return seen +} + +func printFlags() { + type jsonFlag struct { + Name string + Bool bool + Usage string + } + var flags []jsonFlag = nil + flag.VisitAll(func(f *flag.Flag) { + // Don't report {single,multi}checker debugging + // flags or fix as these have no effect on unitchecker + // (as invoked by 'go vet'). + switch f.Name { + case "debug", "cpuprofile", "memprofile", "trace", "fix": + return + } + + b, ok := f.Value.(interface{ IsBoolFlag() bool }) + isBool := ok && b.IsBoolFlag() + flags = append(flags, jsonFlag{f.Name, isBool, f.Usage}) + }) + data, err := json.MarshalIndent(flags, "", "\t") + if err != nil { + log.Fatal(err) + } + os.Stdout.Write(data) +} + +// addVersionFlag registers a -V flag that, if set, +// prints the executable version and exits 0. +// +// If the -V flag already exists ā€” for example, because it was already +// registered by a call to cmd/internal/objabi.AddVersionFlag ā€” then +// addVersionFlag does nothing. +func addVersionFlag() { + if flag.Lookup("V") == nil { + flag.Var(versionFlag{}, "V", "print version and exit") + } +} + +// versionFlag minimally complies with the -V protocol required by "go vet". +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + if s != "full" { + log.Fatalf("unsupported flag value: -V=%s (use -V=full)", s) + } + + // This replicates the minimal subset of + // cmd/internal/objabi.AddVersionFlag, which is private to the + // go tool yet forms part of our command-line interface. + // TODO(adonovan): clarify the contract. + + // Print the tool version so the build system can track changes. + // Formats: + // $progname version devel ... buildID=... + // $progname version go1.9.1 + progname, err := os.Executable() + if err != nil { + return err + } + f, err := os.Open(progname) + if err != nil { + log.Fatal(err) + } + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + log.Fatal(err) + } + f.Close() + fmt.Printf("%s version devel comments-go-here buildID=%02x\n", + progname, string(h.Sum(nil))) + os.Exit(0) + return nil +} + +// A triState is a boolean that knows whether +// it has been set to either true or false. +// It is used to identify whether a flag appears; +// the standard boolean flag cannot +// distinguish missing from unset. +// It also satisfies flag.Value. +type triState int + +const ( + unset triState = iota + setTrue + setFalse +) + +func triStateFlag(name string, value triState, usage string) *triState { + flag.Var(&value, name, usage) + return &value +} + +// triState implements flag.Value, flag.Getter, and flag.boolFlag. +// They work like boolean flags: we can say vet -printf as well as vet -printf=true +func (ts *triState) Get() interface{} { + return *ts == setTrue +} + +func (ts triState) isTrue() bool { + return ts == setTrue +} + +func (ts *triState) Set(value string) error { + b, err := strconv.ParseBool(value) + if err != nil { + // This error message looks poor but package "flag" adds + // "invalid boolean value %q for -NAME: %s" + return fmt.Errorf("want true or false") + } + if b { + *ts = setTrue + } else { + *ts = setFalse + } + return nil +} + +func (ts *triState) String() string { + switch *ts { + case unset: + return "true" + case setTrue: + return "true" + case setFalse: + return "false" + } + panic("not reached") +} + +func (ts triState) IsBoolFlag() bool { + return true +} + +// Legacy flag support + +// vetLegacyFlags maps flags used by legacy vet to their corresponding +// new names. The old names will continue to work. +var vetLegacyFlags = map[string]string{ + // Analyzer name changes + "bool": "bools", + "buildtags": "buildtag", + "methods": "stdmethods", + "rangeloops": "loopclosure", + + // Analyzer flags + "compositewhitelist": "composites.whitelist", + "printfuncs": "printf.funcs", + "shadowstrict": "shadow.strict", + "unusedfuncs": "unusedresult.funcs", + "unusedstringmethods": "unusedresult.stringmethods", +} + +// ---- output helpers common to all drivers ---- + +// PrintPlain prints a diagnostic in plain text form, +// with context specified by the -c flag. +func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) { + posn := fset.Position(diag.Pos) + fmt.Fprintf(os.Stderr, "%s: %s\n", posn, diag.Message) + + // -c=N: show offending line plus N lines of context. + if Context >= 0 { + posn := fset.Position(diag.Pos) + end := fset.Position(diag.End) + if !end.IsValid() { + end = posn + } + data, _ := os.ReadFile(posn.Filename) + lines := strings.Split(string(data), "\n") + for i := posn.Line - Context; i <= end.Line+Context; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1]) + } + } + } +} + +// A JSONTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of JSONDiagnostic. +type JSONTree map[string]map[string]interface{} + +// A TextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type JSONTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A JSONSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type JSONSuggestedFix struct { + Message string `json:"message"` + Edits []JSONTextEdit `json:"edits"` +} + +// A JSONDiagnostic describes the JSON schema of an analysis.Diagnostic. +// +// TODO(matloob): include End position if present. +type JSONDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` + SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` + Related []JSONRelatedInformation `json:"related,omitempty"` +} + +// A JSONRelated describes a secondary position and message related to +// a primary diagnostic. +// +// TODO(adonovan): include End position if present. +type JSONRelatedInformation struct { + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` +} + +// Add adds the result of analysis 'name' on package 'id'. +// The result is either a list of diagnostics or an error. +func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { + var v interface{} + if err != nil { + type jsonError struct { + Err string `json:"error"` + } + v = jsonError{err.Error()} + } else if len(diags) > 0 { + diagnostics := make([]JSONDiagnostic, 0, len(diags)) + for _, f := range diags { + var fixes []JSONSuggestedFix + for _, fix := range f.SuggestedFixes { + var edits []JSONTextEdit + for _, edit := range fix.TextEdits { + edits = append(edits, JSONTextEdit{ + Filename: fset.Position(edit.Pos).Filename, + Start: fset.Position(edit.Pos).Offset, + End: fset.Position(edit.End).Offset, + New: string(edit.NewText), + }) + } + fixes = append(fixes, JSONSuggestedFix{ + Message: fix.Message, + Edits: edits, + }) + } + var related []JSONRelatedInformation + for _, r := range f.Related { + related = append(related, JSONRelatedInformation{ + Posn: fset.Position(r.Pos).String(), + Message: r.Message, + }) + } + jdiag := JSONDiagnostic{ + Category: f.Category, + Posn: fset.Position(f.Pos).String(), + Message: f.Message, + SuggestedFixes: fixes, + Related: related, + } + diagnostics = append(diagnostics, jdiag) + } + v = diagnostics + } + if v != nil { + m, ok := tree[id] + if !ok { + m = make(map[string]interface{}) + tree[id] = m + } + m[name] = v + } +} + +func (tree JSONTree) Print() { + data, err := json.MarshalIndent(tree, "", "\t") + if err != nil { + log.Panicf("internal error: JSON marshaling failed: %v", err) + } + fmt.Printf("%s\n", data) +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go new file mode 100644 index 00000000000..ce92892c817 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "flag" + "fmt" + "log" + "os" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" +) + +const help = `PROGNAME is a tool for static analysis of Go programs. + +PROGNAME examines Go source code and reports suspicious constructs, +such as Printf calls whose arguments do not align with the format +string. It uses heuristics that do not guarantee all reports are +genuine problems, but it can find errors not caught by the compilers. +` + +// Help implements the help subcommand for a multichecker or unitchecker +// style command. The optional args specify the analyzers to describe. +// Help calls log.Fatal if no such analyzer exists. +func Help(progname string, analyzers []*analysis.Analyzer, args []string) { + // No args: show summary of all analyzers. + if len(args) == 0 { + fmt.Println(strings.Replace(help, "PROGNAME", progname, -1)) + fmt.Println("Registered analyzers:") + fmt.Println() + sort.Slice(analyzers, func(i, j int) bool { + return analyzers[i].Name < analyzers[j].Name + }) + for _, a := range analyzers { + title := strings.Split(a.Doc, "\n\n")[0] + fmt.Printf(" %-12s %s\n", a.Name, title) + } + fmt.Println("\nBy default all analyzers are run.") + fmt.Println("To select specific analyzers, use the -NAME flag for each one,") + fmt.Println(" or -NAME=false to run all analyzers not explicitly disabled.") + + // Show only the core command-line flags. + fmt.Println("\nCore flags:") + fmt.Println() + fs := flag.NewFlagSet("", flag.ExitOnError) + flag.VisitAll(func(f *flag.Flag) { + if !strings.Contains(f.Name, ".") { + fs.Var(f.Value, f.Name, f.Usage) + } + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + fmt.Printf("\nTo see details and flags of a specific analyzer, run '%s help name'.\n", progname) + + return + } + + // Show help on specific analyzer(s). +outer: + for _, arg := range args { + for _, a := range analyzers { + if a.Name == arg { + paras := strings.Split(a.Doc, "\n\n") + title := paras[0] + fmt.Printf("%s: %s\n", a.Name, title) + + // Show only the flags relating to this analysis, + // properly prefixed. + first := true + fs := flag.NewFlagSet(a.Name, flag.ExitOnError) + a.Flags.VisitAll(func(f *flag.Flag) { + if first { + first = false + fmt.Println("\nAnalyzer flags:") + fmt.Println() + } + fs.Var(f.Value, a.Name+"."+f.Name, f.Usage) + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + if len(paras) > 1 { + fmt.Printf("\n%s\n", strings.Join(paras[1:], "\n\n")) + } + + continue outer + } + } + log.Fatalf("Analyzer %q not registered", arg) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go new file mode 100644 index 00000000000..26a917a9919 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "fmt" + "net/url" + + "golang.org/x/tools/go/analysis" +) + +// ResolveURL resolves the URL field for a Diagnostic from an Analyzer +// and returns the URL. See Diagnostic.URL for details. +func ResolveURL(a *analysis.Analyzer, d analysis.Diagnostic) (string, error) { + if d.URL == "" && d.Category == "" && a.URL == "" { + return "", nil // do nothing + } + raw := d.URL + if d.URL == "" && d.Category != "" { + raw = "#" + d.Category + } + u, err := url.Parse(raw) + if err != nil { + return "", fmt.Errorf("invalid Diagnostic.URL %q: %s", raw, err) + } + base, err := url.Parse(a.URL) + if err != nil { + return "", fmt.Errorf("invalid Analyzer.URL %q: %s", a.URL, err) + } + return base.ResolveReference(u).String(), nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go new file mode 100644 index 00000000000..8a802831c39 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -0,0 +1,984 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package checker defines the implementation of the checker commands. +// The same code drives the multi-analysis driver, the single-analysis +// driver that is conventionally provided for convenience along with +// each analysis package, and the test driver. +package checker + +import ( + "bytes" + "encoding/gob" + "flag" + "fmt" + "go/format" + "go/token" + "go/types" + "log" + "os" + "reflect" + "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/robustio" +) + +var ( + // Debug is a set of single-letter flags: + // + // f show [f]acts as they are created + // p disable [p]arallel execution of analyzers + // s do additional [s]anity checks on fact types and serialization + // t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise) + // v show [v]erbose logging + // + Debug = "" + + // Log files for optional performance tracing. + CPUProfile, MemProfile, Trace string + + // IncludeTests indicates whether test files should be analyzed too. + IncludeTests = true + + // Fix determines whether to apply all suggested fixes. + Fix bool +) + +// RegisterFlags registers command-line flags used by the analysis driver. +func RegisterFlags() { + // When adding flags here, remember to update + // the list of suppressed flags in analysisflags. + + flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`) + + flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file") + flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file") + flag.StringVar(&Trace, "trace", "", "write trace log to this file") + flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too") + + flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes") +} + +// Run loads the packages specified by args using go/packages, +// then applies the specified analyzers to them. +// Analysis flags must already have been set. +// Analyzers must be valid according to [analysis.Validate]. +// It provides most of the logic for the main functions of both the +// singlechecker and the multi-analysis commands. +// It returns the appropriate exit code. +func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { + if CPUProfile != "" { + f, err := os.Create(CPUProfile) + if err != nil { + log.Fatal(err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + // NB: profile won't be written in case of error. + defer pprof.StopCPUProfile() + } + + if Trace != "" { + f, err := os.Create(Trace) + if err != nil { + log.Fatal(err) + } + if err := trace.Start(f); err != nil { + log.Fatal(err) + } + // NB: trace log won't be written in case of error. + defer func() { + trace.Stop() + log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace) + }() + } + + if MemProfile != "" { + f, err := os.Create(MemProfile) + if err != nil { + log.Fatal(err) + } + // NB: memprofile won't be written in case of error. + defer func() { + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatalf("Writing memory profile: %v", err) + } + f.Close() + }() + } + + // Load the packages. + if dbg('v') { + log.SetPrefix("") + log.SetFlags(log.Lmicroseconds) // display timing + log.Printf("load %s", args) + } + + // Optimization: if the selected analyzers don't produce/consume + // facts, we need source only for the initial packages. + allSyntax := needFacts(analyzers) + initial, err := load(args, allSyntax) + if err != nil { + log.Print(err) + return 1 + } + + pkgsExitCode := 0 + // Print package and module errors regardless of RunDespiteErrors. + // Do not exit if there are errors, yet. + if n := packages.PrintErrors(initial); n > 0 { + pkgsExitCode = 1 + } + + // Run the analyzers. On each package with (transitive) + // errors, we run only the subset of analyzers that are + // marked (and whose transitive requirements are also + // marked) with RunDespiteErrors. + roots := analyze(initial, analyzers) + + // Apply fixes. + if Fix { + if err := applyFixes(roots); err != nil { + // Fail when applying fixes failed. + log.Print(err) + return 1 + } + } + + // Print the results. If !RunDespiteErrors and there + // are errors in the packages, this will have 0 exit + // code. Otherwise, we prefer to return exit code + // indicating diagnostics. + if diagExitCode := printDiagnostics(roots); diagExitCode != 0 { + return diagExitCode // there were diagnostics + } + return pkgsExitCode // package errors but no diagnostics +} + +// load loads the initial packages. Returns only top-level loading +// errors. Does not consider errors in packages. +func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { + mode := packages.LoadSyntax + if allSyntax { + mode = packages.LoadAllSyntax + } + mode |= packages.NeedModule + conf := packages.Config{ + Mode: mode, + Tests: IncludeTests, + } + initial, err := packages.Load(&conf, patterns...) + if err == nil && len(initial) == 0 { + err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) + } + return initial, err +} + +// TestAnalyzer applies an analyzer to a set of packages (and their +// dependencies if necessary) and returns the results. +// The analyzer must be valid according to [analysis.Validate]. +// +// Facts about pkg are returned in a map keyed by object; package facts +// have a nil key. +// +// This entry point is used only by analysistest. +func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult { + var results []*TestAnalyzerResult + for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) { + facts := make(map[types.Object][]analysis.Fact) + for key, fact := range act.objectFacts { + if key.obj.Pkg() == act.pass.Pkg { + facts[key.obj] = append(facts[key.obj], fact) + } + } + for key, fact := range act.packageFacts { + if key.pkg == act.pass.Pkg { + facts[nil] = append(facts[nil], fact) + } + } + + results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err}) + } + return results +} + +type TestAnalyzerResult struct { + Pass *analysis.Pass + Diagnostics []analysis.Diagnostic + Facts map[types.Object][]analysis.Fact + Result interface{} + Err error +} + +func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { + // Construct the action graph. + if dbg('v') { + log.Printf("building graph of analysis passes") + } + + // Each graph node (action) is one unit of analysis. + // Edges express package-to-package (vertical) dependencies, + // and analysis-to-analysis (horizontal) dependencies. + type key struct { + *analysis.Analyzer + *packages.Package + } + actions := make(map[key]*action) + + var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action + mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action { + k := key{a, pkg} + act, ok := actions[k] + if !ok { + act = &action{a: a, pkg: pkg} + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.deps = append(act.deps, mkAction(req, pkg)) + } + + // An analysis that consumes/produces facts + // must run on the package's dependencies too. + if len(a.FactTypes) > 0 { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + dep := mkAction(a, pkg.Imports[path]) + act.deps = append(act.deps, dep) + } + } + + actions[k] = act + } + return act + } + + // Build nodes for initial packages. + var roots []*action + for _, a := range analyzers { + for _, pkg := range pkgs { + root := mkAction(a, pkg) + root.isroot = true + roots = append(roots, root) + } + } + + // Execute the graph in parallel. + execAll(roots) + + return roots +} + +func applyFixes(roots []*action) error { + // visit all of the actions and accumulate the suggested edits. + paths := make(map[robustio.FileID]string) + editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit) + visited := make(map[*action]bool) + var apply func(*action) error + var visitAll func(actions []*action) error + visitAll = func(actions []*action) error { + for _, act := range actions { + if !visited[act] { + visited[act] = true + if err := visitAll(act.deps); err != nil { + return err + } + if err := apply(act); err != nil { + return err + } + } + } + return nil + } + + apply = func(act *action) error { + editsForTokenFile := make(map[*token.File][]diff.Edit) + for _, diag := range act.diagnostics { + for _, sf := range diag.SuggestedFixes { + for _, edit := range sf.TextEdits { + // Validate the edit. + // Any error here indicates a bug in the analyzer. + start, end := edit.Pos, edit.End + file := act.pkg.Fset.File(start) + if file == nil { + return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)", + act.a.Name, start) + } + if !end.IsValid() { + end = start + } + if start > end { + return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)", + act.a.Name, start, end) + } + if eof := token.Pos(file.Base() + file.Size()); end > eof { + return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)", + act.a.Name, end, eof) + } + edit := diff.Edit{ + Start: file.Offset(start), + End: file.Offset(end), + New: string(edit.NewText), + } + editsForTokenFile[file] = append(editsForTokenFile[file], edit) + } + } + } + + for f, edits := range editsForTokenFile { + id, _, err := robustio.GetFileID(f.Name()) + if err != nil { + return err + } + if _, hasId := paths[id]; !hasId { + paths[id] = f.Name() + editsByAction[id] = make(map[*action][]diff.Edit) + } + editsByAction[id][act] = edits + } + return nil + } + + if err := visitAll(roots); err != nil { + return err + } + + // Validate and group the edits to each actual file. + editsByPath := make(map[string][]diff.Edit) + for id, actToEdits := range editsByAction { + path := paths[id] + actions := make([]*action, 0, len(actToEdits)) + for act := range actToEdits { + actions = append(actions, act) + } + + // Does any action create conflicting edits? + for _, act := range actions { + edits := actToEdits[act] + if _, invalid := validateEdits(edits); invalid > 0 { + name, x, y := act.a.Name, edits[invalid-1], edits[invalid] + return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) + } + } + + // Does any pair of different actions create edits that conflict? + for j := range actions { + for k := range actions[:j] { + x, y := actions[j], actions[k] + if x.a.Name > y.a.Name { + x, y = y, x + } + xedits, yedits := actToEdits[x], actToEdits[y] + combined := append(xedits, yedits...) + if _, invalid := validateEdits(combined); invalid > 0 { + // TODO: consider applying each action's consistent list of edits entirely, + // and then using a three-way merge (such as GNU diff3) on the resulting + // files to report more precisely the parts that actually conflict. + return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits) + } + } + } + + var edits []diff.Edit + for act := range actToEdits { + edits = append(edits, actToEdits[act]...) + } + editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. + } + + // Now we've got a set of valid edits for each file. Apply them. + for path, edits := range editsByPath { + // TODO(adonovan): this should really work on the same + // gulp from the file system that fed the analyzer (see #62292). + contents, err := os.ReadFile(path) + if err != nil { + return err + } + + out, err := diff.ApplyBytes(contents, edits) + if err != nil { + return err + } + + // Try to format the file. + if formatted, err := format.Source(out); err == nil { + out = formatted + } + + if err := os.WriteFile(path, out, 0644); err != nil { + return err + } + } + return nil +} + +// validateEdits returns a list of edits that is sorted and +// contains no duplicate edits. Returns the index of some +// overlapping adjacent edits if there is one and <0 if the +// edits are valid. +func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { + if len(edits) == 0 { + return nil, -1 + } + equivalent := func(x, y diff.Edit) bool { + return x.Start == y.Start && x.End == y.End && x.New == y.New + } + diff.SortEdits(edits) + unique := []diff.Edit{edits[0]} + invalid := -1 + for i := 1; i < len(edits); i++ { + prev, cur := edits[i-1], edits[i] + // We skip over equivalent edits without considering them + // an error. This handles identical edits coming from the + // multiple ways of loading a package into a + // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". + if !equivalent(prev, cur) { + unique = append(unique, cur) + if prev.End > cur.Start { + invalid = i + } + } + } + return unique, invalid +} + +// diff3Conflict returns an error describing two conflicting sets of +// edits on a file at path. +func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error { + contents, err := os.ReadFile(path) + if err != nil { + return err + } + oldlabel, old := "base", string(contents) + + xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines) + if err != nil { + return err + } + ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines) + if err != nil { + return err + } + + return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", + xlabel, ylabel, path, xdiff, ydiff) +} + +// printDiagnostics prints the diagnostics for the root packages in either +// plain text or JSON format. JSON format also includes errors for any +// dependencies. +// +// It returns the exitcode: in plain mode, 0 for success, 1 for analysis +// errors, and 3 for diagnostics. We avoid 2 since the flag package uses +// it. JSON mode always succeeds at printing errors and diagnostics in a +// structured form to stdout. +func printDiagnostics(roots []*action) (exitcode int) { + // Print the output. + // + // Print diagnostics only for root packages, + // but errors for all packages. + printed := make(map[*action]bool) + var print func(*action) + var visitAll func(actions []*action) + visitAll = func(actions []*action) { + for _, act := range actions { + if !printed[act] { + printed[act] = true + visitAll(act.deps) + print(act) + } + } + } + + if analysisflags.JSON { + // JSON output + tree := make(analysisflags.JSONTree) + print = func(act *action) { + var diags []analysis.Diagnostic + if act.isroot { + diags = act.diagnostics + } + tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err) + } + visitAll(roots) + tree.Print() + } else { + // plain text output + + // De-duplicate diagnostics by position (not token.Pos) to + // avoid double-reporting in source files that belong to + // multiple packages, such as foo and foo.test. + type key struct { + pos token.Position + end token.Position + *analysis.Analyzer + message string + } + seen := make(map[key]bool) + + print = func(act *action) { + if act.err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err) + exitcode = 1 // analysis failed, at least partially + return + } + if act.isroot { + for _, diag := range act.diagnostics { + // We don't display a.Name/f.Category + // as most users don't care. + + posn := act.pkg.Fset.Position(diag.Pos) + end := act.pkg.Fset.Position(diag.End) + k := key{posn, end, act.a, diag.Message} + if seen[k] { + continue // duplicate + } + seen[k] = true + + analysisflags.PrintPlain(act.pkg.Fset, diag) + } + } + } + visitAll(roots) + + if exitcode == 0 && len(seen) > 0 { + exitcode = 3 // successfully produced diagnostics + } + } + + // Print timing info. + if dbg('t') { + if !dbg('p') { + log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") + } + var all []*action + var total time.Duration + for act := range printed { + all = append(all, act) + total += act.duration + } + sort.Slice(all, func(i, j int) bool { + return all[i].duration > all[j].duration + }) + + // Print actions accounting for 90% of the total. + var sum time.Duration + for _, act := range all { + fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act) + sum += act.duration + if sum >= total*9/10 { + break + } + } + } + + return exitcode +} + +// needFacts reports whether any analysis required by the specified set +// needs facts. If so, we must load the entire program from source. +func needFacts(analyzers []*analysis.Analyzer) bool { + seen := make(map[*analysis.Analyzer]bool) + var q []*analysis.Analyzer // for BFS + q = append(q, analyzers...) + for len(q) > 0 { + a := q[0] + q = q[1:] + if !seen[a] { + seen[a] = true + if len(a.FactTypes) > 0 { + return true + } + q = append(q, a.Requires...) + } + } + return false +} + +// An action represents one unit of analysis work: the application of +// one analysis to one package. Actions form a DAG, both within a +// package (as different analyzers are applied, either in sequence or +// parallel), and across packages (as dependencies are analyzed). +type action struct { + once sync.Once + a *analysis.Analyzer + pkg *packages.Package + pass *analysis.Pass + isroot bool + deps []*action + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + result interface{} + diagnostics []analysis.Diagnostic + err error + duration time.Duration +} + +type objectFactKey struct { + obj types.Object + typ reflect.Type +} + +type packageFactKey struct { + pkg *types.Package + typ reflect.Type +} + +func (act *action) String() string { + return fmt.Sprintf("%s@%s", act.a, act.pkg) +} + +func execAll(actions []*action) { + sequential := dbg('p') + var wg sync.WaitGroup + for _, act := range actions { + wg.Add(1) + work := func(act *action) { + act.exec() + wg.Done() + } + if sequential { + work(act) + } else { + go work(act) + } + } + wg.Wait() +} + +func (act *action) exec() { act.once.Do(act.execOnce) } + +func (act *action) execOnce() { + // Analyze dependencies. + execAll(act.deps) + + // TODO(adonovan): uncomment this during profiling. + // It won't build pre-go1.11 but conditional compilation + // using build tags isn't warranted. + // + // ctx, task := trace.NewTask(context.Background(), "exec") + // trace.Log(ctx, "pass", act.String()) + // defer task.End() + + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + if dbg('t') { + t0 := time.Now() + defer func() { act.duration = time.Since(t0) }() + } + + // Report an error if any dependency failed. + var failed []string + for _, dep := range act.deps { + if dep.err != nil { + failed = append(failed, dep.String()) + } + } + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]interface{}) + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + for _, dep := range act.deps { + if dep.pkg == act.pkg { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.a] = dep.result + + } else if dep.a == act.a { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + + module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. + if mod := act.pkg.Module; mod != nil { + module.Path = mod.Path + module.Version = mod.Version + module.GoVersion = mod.GoVersion + } + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.a, + Fset: act.pkg.Fset, + Files: act.pkg.Syntax, + OtherFiles: act.pkg.OtherFiles, + IgnoredFiles: act.pkg.IgnoredFiles, + Pkg: act.pkg.Types, + TypesInfo: act.pkg.TypesInfo, + TypesSizes: act.pkg.TypesSizes, + TypeErrors: act.pkg.TypeErrors, + Module: module, + + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: act.importObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.importPackageFact, + ExportPackageFact: act.exportPackageFact, + AllObjectFacts: act.allObjectFacts, + AllPackageFacts: act.allPackageFacts, + } + pass.ReadFile = analysisinternal.MakeReadFile(pass) + act.pass = pass + + var err error + if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { + err = fmt.Errorf("analysis skipped due to errors in package") + } else { + act.result, err = pass.Analyzer.Run(pass) + if err == nil { + if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { + err = fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + } + } + if err == nil { // resolve diagnostic URLs + for i := range act.diagnostics { + if url, uerr := analysisflags.ResolveURL(act.a, act.diagnostics[i]); uerr == nil { + act.diagnostics[i].URL = url + } else { + err = uerr // keep the last error + } + } + } + act.err = err + + // disallow calls after Run + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil +} + +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *action) { + serialize := dbg('s') + + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.obj, dep.pkg.Types) { + if false { + log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) + } + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) + } + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) + } + act.packageFacts[key] = fact + } +} + +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(new); err != nil { + return nil, err + } + return new, nil +} + +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to other packages, +// that find there way into the API. +// This is an overapproximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + if obj.IsField() { + return true + } + // we can't filter more aggressively than this because we need + // to consider function parameters exported, but have no way + // of telling apart function parameters from local variables. + return obj.Pkg() == pkg + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} + +// importObjectFact implements Pass.ImportObjectFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// importObjectFact copies the fact value to *ptr. +func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportObjectFact implements Pass.ExportObjectFact. +func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if act.pass.ExportObjectFact == nil { + log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.pkg.Types { + log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.a, act.pkg, obj, fact) + } + + key := objectFactKey{obj, factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if dbg('f') { + objstr := types.ObjectString(obj, (*types.Package).Name) + fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n", + act.pkg.Fset.Position(obj.Pos()), objstr, fact) + } +} + +// allObjectFacts implements Pass.AllObjectFacts. +func (act *action) allObjectFacts() []analysis.ObjectFact { + facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) + for k := range act.objectFacts { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) + } + return facts +} + +// importPackageFact implements Pass.ImportPackageFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// fact copies the fact value to *ptr. +func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportPackageFact implements Pass.ExportPackageFact. +func (act *action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + + key := packageFactKey{act.pass.Pkg, factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + if dbg('f') { + fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n", + act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) + } +} + +func factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + log.Fatalf("invalid Fact type: got %T, want pointer", fact) + } + return t +} + +// allPackageFacts implements Pass.AllPackageFacts. +func (act *action) allPackageFacts() []analysis.PackageFact { + facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) + for k := range act.packageFacts { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) + } + return facts +} + +func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go new file mode 100644 index 00000000000..3b121cb0ce7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspect defines an Analyzer that provides an AST inspector +// (golang.org/x/tools/go/ast/inspector.Inspector) for the syntax trees +// of a package. It is only a building block for other analyzers. +// +// Example of use in another analysis: +// +// import ( +// "golang.org/x/tools/go/analysis" +// "golang.org/x/tools/go/analysis/passes/inspect" +// "golang.org/x/tools/go/ast/inspector" +// ) +// +// var Analyzer = &analysis.Analyzer{ +// ... +// Requires: []*analysis.Analyzer{inspect.Analyzer}, +// } +// +// func run(pass *analysis.Pass) (interface{}, error) { +// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) +// inspect.Preorder(nil, func(n ast.Node) { +// ... +// }) +// return nil, nil +// } +package inspect + +import ( + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "inspect", + Doc: "optimize AST traversal for later passes", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", + Run: run, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(new(inspector.Inspector)), +} + +func run(pass *analysis.Pass) (interface{}, error) { + return inspector.New(pass.Files), nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/singlechecker/singlechecker.go b/vendor/golang.org/x/tools/go/analysis/singlechecker/singlechecker.go new file mode 100644 index 00000000000..91044ca0858 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/singlechecker/singlechecker.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singlechecker defines the main function for an analysis +// driver with only a single analysis. +// This package makes it easy for a provider of an analysis package to +// also provide a standalone tool that runs just that analysis. +// +// For example, if example.org/findbadness is an analysis package, +// all that is needed to define a standalone tool is a file, +// example.org/findbadness/cmd/findbadness/main.go, containing: +// +// // The findbadness command runs an analysis. +// package main +// +// import ( +// "example.org/findbadness" +// "golang.org/x/tools/go/analysis/singlechecker" +// ) +// +// func main() { singlechecker.Main(findbadness.Analyzer) } +package singlechecker + +import ( + "flag" + "fmt" + "log" + "os" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/analysis/unitchecker" +) + +// Main is the main function for a checker command for a single analysis. +func Main(a *analysis.Analyzer) { + log.SetFlags(0) + log.SetPrefix(a.Name + ": ") + + analyzers := []*analysis.Analyzer{a} + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + checker.RegisterFlags() + + flag.Usage = func() { + paras := strings.Split(a.Doc, "\n\n") + fmt.Fprintf(os.Stderr, "%s: %s\n\n", a.Name, paras[0]) + fmt.Fprintf(os.Stderr, "Usage: %s [-flag] [package]\n\n", a.Name) + if len(paras) > 1 { + fmt.Fprintln(os.Stderr, strings.Join(paras[1:], "\n\n")) + } + fmt.Fprintln(os.Stderr, "\nFlags:") + flag.PrintDefaults() + } + + analyzers = analysisflags.Parse(analyzers, false) + + args := flag.Args() + if len(args) == 0 { + flag.Usage() + os.Exit(1) + } + + if len(args) == 1 && strings.HasSuffix(args[0], ".cfg") { + unitchecker.Run(args[0], analyzers) + panic("unreachable") + } + + os.Exit(checker.Run(args, analyzers)) +} diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go new file mode 100644 index 00000000000..71ebbfaef15 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -0,0 +1,452 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The unitchecker package defines the main function for an analysis +// driver that analyzes a single compilation unit during a build. +// It is invoked by a build system such as "go vet": +// +// $ go vet -vettool=$(which vet) +// +// It supports the following command-line protocol: +// +// -V=full describe executable (to the build tool) +// -flags describe flags (to the build tool) +// foo.cfg description of compilation unit (from the build tool) +// +// This package does not depend on go/packages. +// If you need a standalone tool, use multichecker, +// which supports this mode but can also load packages +// from source using go/packages. +package unitchecker + +// TODO(adonovan): +// - with gccgo, go build does not build standard library, +// so we will not get to analyze it. Yet we must in order +// to create base facts for, say, the fmt package for the +// printf checker. + +import ( + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/ast" + "go/build" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "log" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/facts" + "golang.org/x/tools/internal/versions" +) + +// A Config describes a compilation unit to be analyzed. +// It is provided to the tool in a JSON-encoded file +// whose name ends with ".cfg". +type Config struct { + ID string // e.g. "fmt [fmt.test]" + Compiler string // gc or gccgo, provided to MakeImporter + Dir string // (unused) + ImportPath string // package path + GoVersion string // minimum required Go version, such as "go1.21.0" + GoFiles []string + NonGoFiles []string + IgnoredFiles []string + ModulePath string // module path + ModuleVersion string // module version + ImportMap map[string]string // maps import path to package path + PackageFile map[string]string // maps package path to file of type information + Standard map[string]bool // package belongs to standard library + PackageVetx map[string]string // maps package path to file of fact information + VetxOnly bool // run analysis only for facts, not diagnostics + VetxOutput string // where to write file of fact information + SucceedOnTypecheckFailure bool +} + +// Main is the main function of a vet-like analysis tool that must be +// invoked by a build system to analyze a single package. +// +// The protocol required by 'go vet -vettool=...' is that the tool must support: +// +// -flags describe flags in JSON +// -V=full describe executable for build caching +// foo.cfg perform separate modular analyze on the single +// unit described by a JSON config file foo.cfg. +func Main(analyzers ...*analysis.Analyzer) { + progname := filepath.Base(os.Args[0]) + log.SetFlags(0) + log.SetPrefix(progname + ": ") + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `%[1]s is a tool for static analysis of Go programs. + +Usage of %[1]s: + %.16[1]s unit.cfg # execute analysis specified by config file + %.16[1]s help # general help, including listing analyzers and flags + %.16[1]s help name # help on specific analyzer and its flags +`, progname) + os.Exit(1) + } + + analyzers = analysisflags.Parse(analyzers, true) + + args := flag.Args() + if len(args) == 0 { + flag.Usage() + } + if args[0] == "help" { + analysisflags.Help(progname, analyzers, args[1:]) + os.Exit(0) + } + if len(args) != 1 || !strings.HasSuffix(args[0], ".cfg") { + log.Fatalf(`invoking "go tool vet" directly is unsupported; use "go vet"`) + } + Run(args[0], analyzers) +} + +// Run reads the *.cfg file, runs the analysis, +// and calls os.Exit with an appropriate error code. +// It assumes flags have already been set. +func Run(configFile string, analyzers []*analysis.Analyzer) { + cfg, err := readConfig(configFile) + if err != nil { + log.Fatal(err) + } + + fset := token.NewFileSet() + results, err := run(fset, cfg, analyzers) + if err != nil { + log.Fatal(err) + } + + // In VetxOnly mode, the analysis is run only for facts. + if !cfg.VetxOnly { + if analysisflags.JSON { + // JSON output + tree := make(analysisflags.JSONTree) + for _, res := range results { + tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err) + } + tree.Print() + } else { + // plain text + exit := 0 + for _, res := range results { + if res.err != nil { + log.Println(res.err) + exit = 1 + } + } + for _, res := range results { + for _, diag := range res.diagnostics { + analysisflags.PrintPlain(fset, diag) + exit = 1 + } + } + os.Exit(exit) + } + } + + os.Exit(0) +} + +func readConfig(filename string) (*Config, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := new(Config) + if err := json.Unmarshal(data, cfg); err != nil { + return nil, fmt.Errorf("cannot decode JSON config file %s: %v", filename, err) + } + if len(cfg.GoFiles) == 0 { + // The go command disallows packages with no files. + // The only exception is unsafe, but the go command + // doesn't call vet on it. + return nil, fmt.Errorf("package has no files: %s", cfg.ImportPath) + } + return cfg, nil +} + +type factImporter = func(pkgPath string) ([]byte, error) + +// These four hook variables are a proof of concept of a future +// parameterization of a unitchecker API that allows the client to +// determine how and where facts and types are produced and consumed. +// (Note that the eventual API will likely be quite different.) +// +// The defaults honor a Config in a manner compatible with 'go vet'. +var ( + makeTypesImporter = func(cfg *Config, fset *token.FileSet) types.Importer { + compilerImporter := importer.ForCompiler(fset, cfg.Compiler, func(path string) (io.ReadCloser, error) { + // path is a resolved package path, not an import path. + file, ok := cfg.PackageFile[path] + if !ok { + if cfg.Compiler == "gccgo" && cfg.Standard[path] { + return nil, nil // fall back to default gccgo lookup + } + return nil, fmt.Errorf("no package file for %q", path) + } + return os.Open(file) + }) + return importerFunc(func(importPath string) (*types.Package, error) { + path, ok := cfg.ImportMap[importPath] // resolve vendoring, etc + if !ok { + return nil, fmt.Errorf("can't resolve import %q", path) + } + return compilerImporter.Import(path) + }) + } + + exportTypes = func(*Config, *token.FileSet, *types.Package) error { + // By default this is a no-op, because "go vet" + // makes the compiler produce type information. + return nil + } + + makeFactImporter = func(cfg *Config) factImporter { + return func(pkgPath string) ([]byte, error) { + if vetx, ok := cfg.PackageVetx[pkgPath]; ok { + return os.ReadFile(vetx) + } + return nil, nil // no .vetx file, no facts + } + } + + exportFacts = func(cfg *Config, data []byte) error { + return os.WriteFile(cfg.VetxOutput, data, 0666) + } +) + +func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]result, error) { + // Load, parse, typecheck. + var files []*ast.File + for _, name := range cfg.GoFiles { + f, err := parser.ParseFile(fset, name, nil, parser.ParseComments) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report parse errors. + err = nil + } + return nil, err + } + files = append(files, f) + } + tc := &types.Config{ + Importer: makeTypesImporter(cfg, fset), + Sizes: types.SizesFor("gc", build.Default.GOARCH), // TODO(adonovan): use cfg.Compiler + GoVersion: cfg.GoVersion, + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + versions.InitFileVersions(info) + + pkg, err := tc.Check(cfg.ImportPath, fset, files, info) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report type errors. + err = nil + } + return nil, err + } + + // Register fact types with gob. + // In VetxOnly mode, analyzers are only for their facts, + // so we can skip any analysis that neither produces facts + // nor depends on any analysis that produces facts. + // + // TODO(adonovan): fix: the command (and logic!) here are backwards. + // It should say "...nor is required by any...". (Issue 443099) + // + // Also build a map to hold working state and result. + type action struct { + once sync.Once + result interface{} + err error + usesFacts bool // (transitively uses) + diagnostics []analysis.Diagnostic + } + actions := make(map[*analysis.Analyzer]*action) + var registerFacts func(a *analysis.Analyzer) bool + registerFacts = func(a *analysis.Analyzer) bool { + act, ok := actions[a] + if !ok { + act = new(action) + var usesFacts bool + for _, f := range a.FactTypes { + usesFacts = true + gob.Register(f) + } + for _, req := range a.Requires { + if registerFacts(req) { + usesFacts = true + } + } + act.usesFacts = usesFacts + actions[a] = act + } + return act.usesFacts + } + var filtered []*analysis.Analyzer + for _, a := range analyzers { + if registerFacts(a) || !cfg.VetxOnly { + filtered = append(filtered, a) + } + } + analyzers = filtered + + // Read facts from imported packages. + facts, err := facts.NewDecoder(pkg).Decode(makeFactImporter(cfg)) + if err != nil { + return nil, err + } + + // In parallel, execute the DAG of analyzers. + var exec func(a *analysis.Analyzer) *action + var execAll func(analyzers []*analysis.Analyzer) + exec = func(a *analysis.Analyzer) *action { + act := actions[a] + act.once.Do(func() { + execAll(a.Requires) // prefetch dependencies in parallel + + // The inputs to this analysis are the + // results of its prerequisites. + inputs := make(map[*analysis.Analyzer]interface{}) + var failed []string + for _, req := range a.Requires { + reqact := exec(req) + if reqact.err != nil { + failed = append(failed, req.String()) + continue + } + inputs[req] = reqact.result + } + + // Report an error if any dependency failed. + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + factFilter := make(map[reflect.Type]bool) + for _, f := range a.FactTypes { + factFilter[reflect.TypeOf(f)] = true + } + + module := &analysis.Module{ + Path: cfg.ModulePath, + Version: cfg.ModuleVersion, + GoVersion: cfg.GoVersion, + } + + pass := &analysis.Pass{ + Analyzer: a, + Fset: fset, + Files: files, + OtherFiles: cfg.NonGoFiles, + IgnoredFiles: cfg.IgnoredFiles, + Pkg: pkg, + TypesInfo: info, + TypesSizes: tc.Sizes, + TypeErrors: nil, // unitchecker doesn't RunDespiteErrors + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: facts.ImportObjectFact, + ExportObjectFact: facts.ExportObjectFact, + AllObjectFacts: func() []analysis.ObjectFact { return facts.AllObjectFacts(factFilter) }, + ImportPackageFact: facts.ImportPackageFact, + ExportPackageFact: facts.ExportPackageFact, + AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) }, + Module: module, + } + pass.ReadFile = analysisinternal.MakeReadFile(pass) + + t0 := time.Now() + act.result, act.err = a.Run(pass) + + if act.err == nil { // resolve URLs on diagnostics. + for i := range act.diagnostics { + if url, uerr := analysisflags.ResolveURL(a, act.diagnostics[i]); uerr == nil { + act.diagnostics[i].URL = url + } else { + act.err = uerr // keep the last error + } + } + } + if false { + log.Printf("analysis %s = %s", pass, time.Since(t0)) + } + }) + return act + } + execAll = func(analyzers []*analysis.Analyzer) { + var wg sync.WaitGroup + for _, a := range analyzers { + wg.Add(1) + go func(a *analysis.Analyzer) { + _ = exec(a) + wg.Done() + }(a) + } + wg.Wait() + } + + execAll(analyzers) + + // Return diagnostics and errors from root analyzers. + results := make([]result, len(analyzers)) + for i, a := range analyzers { + act := actions[a] + results[i].a = a + results[i].err = act.err + results[i].diagnostics = act.diagnostics + } + + data := facts.Encode() + if err := exportFacts(cfg, data); err != nil { + return nil, fmt.Errorf("failed to export analysis facts: %v", err) + } + if err := exportTypes(cfg, fset, pkg); err != nil { + return nil, fmt.Errorf("failed to export type information: %v", err) + } + + return results, nil +} + +type result struct { + a *analysis.Analyzer + diagnostics []analysis.Diagnostic + err error +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go new file mode 100644 index 00000000000..4f2c4045622 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -0,0 +1,137 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import ( + "fmt" + "reflect" + "strings" + "unicode" +) + +// Validate reports an error if any of the analyzers are misconfigured. +// Checks include: +// that the name is a valid identifier; +// that the Doc is not empty; +// that the Run is non-nil; +// that the Requires graph is acyclic; +// that analyzer fact types are unique; +// that each fact type is a pointer. +// +// Analyzer names need not be unique, though this may be confusing. +func Validate(analyzers []*Analyzer) error { + // Map each fact type to its sole generating analyzer. + factTypes := make(map[reflect.Type]*Analyzer) + + // Traverse the Requires graph, depth first. + const ( + white = iota + grey + black + finished + ) + color := make(map[*Analyzer]uint8) + var visit func(a *Analyzer) error + visit = func(a *Analyzer) error { + if a == nil { + return fmt.Errorf("nil *Analyzer") + } + if color[a] == white { + color[a] = grey + + // names + if !validIdent(a.Name) { + return fmt.Errorf("invalid analyzer name %q", a) + } + + if a.Doc == "" { + return fmt.Errorf("analyzer %q is undocumented", a) + } + + if a.Run == nil { + return fmt.Errorf("analyzer %q has nil Run", a) + } + // fact types + for _, f := range a.FactTypes { + if f == nil { + return fmt.Errorf("analyzer %s has nil FactType", a) + } + t := reflect.TypeOf(f) + if prev := factTypes[t]; prev != nil { + return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", + t, a, prev) + } + if t.Kind() != reflect.Ptr { + return fmt.Errorf("%s: fact type %s is not a pointer", a, t) + } + factTypes[t] = a + } + + // recursion + for _, req := range a.Requires { + if err := visit(req); err != nil { + return err + } + } + color[a] = black + } + + if color[a] == grey { + stack := []*Analyzer{a} + inCycle := map[string]bool{} + for len(stack) > 0 { + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if color[current] == grey && !inCycle[current.Name] { + inCycle[current.Name] = true + stack = append(stack, current.Requires...) + } + } + return &CycleInRequiresGraphError{AnalyzerNames: inCycle} + } + + return nil + } + for _, a := range analyzers { + if err := visit(a); err != nil { + return err + } + } + + // Reject duplicates among analyzers. + // Precondition: color[a] == black. + // Postcondition: color[a] == finished. + for _, a := range analyzers { + if color[a] == finished { + return fmt.Errorf("duplicate analyzer: %s", a.Name) + } + color[a] = finished + } + + return nil +} + +func validIdent(name string) bool { + for i, r := range name { + if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) { + return false + } + } + return name != "" +} + +type CycleInRequiresGraphError struct { + AnalyzerNames map[string]bool +} + +func (e *CycleInRequiresGraphError) Error() string { + var b strings.Builder + b.WriteString("cycle detected involving the following analyzers:") + for n := range e.AnalyzerNames { + b.WriteByte(' ') + b.WriteString(n) + } + return b.String() +} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go new file mode 100644 index 00000000000..e0b13e70a01 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -0,0 +1,513 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisinternal provides gopls' internal analyses with a +// number of helper functions that operate on typed syntax trees. +package analysisinternal + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + pathpkg "path" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/aliases" +) + +func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { + // Get the end position for the type error. + offset, end := fset.PositionFor(start, false).Offset, start + if offset >= len(src) { + return end + } + if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 { + end = start + token.Pos(width) + } + return end +} + +func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + // TODO(adonovan): think about generics, and also generic aliases. + under := aliases.Unalias(typ) + // Don't call Underlying unconditionally: although it removes + // Named and Alias, it also removes TypeParam. + if n, ok := under.(*types.Named); ok { + under = n.Underlying() + } + switch under := under.(type) { + case *types.Basic: + switch { + case under.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"} + case under.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"} + case under.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`} + default: + panic(fmt.Sprintf("unknown basic type %v", under)) + } + case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: + return ast.NewIdent("nil") + case *types.Struct: + texpr := TypeExpr(f, pkg, typ) // typ because we want the name here. + if texpr == nil { + return nil + } + return &ast.CompositeLit{ + Type: texpr, + } + } + return nil +} + +// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of +// analysisinternal.ZeroValue) +func IsZeroValue(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +// TypeExpr returns syntax for the specified type. References to +// named types from packages other than pkg are qualified by an appropriate +// package name, as defined by the import environment of file. +func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + switch t := typ.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + case *types.Pointer: + x := TypeExpr(f, pkg, t.Elem()) + if x == nil { + return nil + } + return &ast.UnaryExpr{ + Op: token.MUL, + X: x, + } + case *types.Array: + elt := TypeExpr(f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: elt, + } + case *types.Slice: + elt := TypeExpr(f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Elt: elt, + } + case *types.Map: + key := TypeExpr(f, pkg, t.Key()) + value := TypeExpr(f, pkg, t.Elem()) + if key == nil || value == nil { + return nil + } + return &ast.MapType{ + Key: key, + Value: value, + } + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + value := TypeExpr(f, pkg, t.Elem()) + if value == nil { + return nil + } + return &ast.ChanType{ + Dir: dir, + Value: value, + } + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + p := TypeExpr(f, pkg, t.Params().At(i).Type()) + if p == nil { + return nil + } + params = append(params, &ast.Field{ + Type: p, + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + r := TypeExpr(f, pkg, t.Results().At(i).Type()) + if r == nil { + return nil + } + returns = append(returns, &ast.Field{ + Type: r, + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} + if t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } + if t.Obj().Pkg() == pkg { + return ast.NewIdent(t.Obj().Name()) + } + pkgName := t.Obj().Pkg().Name() + + // If the file already imports the package under another name, use that. + for _, cand := range f.Imports { + if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { + if cand.Name != nil && cand.Name.Name != "" { + pkgName = cand.Name.Name + } + } + } + if pkgName == "." { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + case *types.Struct: + return ast.NewIdent(t.String()) + case *types.Interface: + return ast.NewIdent(t.String()) + default: + return nil + } +} + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + case *ast.SwitchStmt, *ast.TypeSwitchStmt: + return expr.(ast.Stmt) + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} + +// WalkASTWithParent walks the AST rooted at n. The semantics are +// similar to ast.Inspect except it does not call f(nil). +func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { + var ancestors []ast.Node + ast.Inspect(n, func(n ast.Node) (recurse bool) { + if n == nil { + ancestors = ancestors[:len(ancestors)-1] + return false + } + + var parent ast.Node + if len(ancestors) > 0 { + parent = ancestors[len(ancestors)-1] + } + ancestors = append(ancestors, n) + return f(n, parent) + }) +} + +// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types. +// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within +// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that +// is unrecognized. +func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string { + + // Initialize matches to contain the variable types we are searching for. + matches := make(map[types.Type][]string) + for _, typ := range typs { + if typ == nil { + continue // TODO(adonovan): is this reachable? + } + matches[typ] = nil // create entry + } + + seen := map[types.Object]struct{}{} + ast.Inspect(node, func(n ast.Node) bool { + if n == nil { + return false + } + // Prevent circular definitions. If 'pos' is within an assignment statement, do not + // allow any identifiers in that assignment statement to be selected. Otherwise, + // we could do the following, where 'x' satisfies the type of 'f0': + // + // x := fakeStruct{f0: x} + // + if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() { + return false + } + if n.End() > pos { + return n.Pos() <= pos + } + ident, ok := n.(*ast.Ident) + if !ok || ident.Name == "_" { + return true + } + obj := info.Defs[ident] + if obj == nil || obj.Type() == nil { + return true + } + if _, ok := obj.(*types.TypeName); ok { + return true + } + // Prevent duplicates in matches' values. + if _, ok = seen[obj]; ok { + return true + } + seen[obj] = struct{}{} + // Find the scope for the given position. Then, check whether the object + // exists within the scope. + innerScope := pkg.Scope().Innermost(pos) + if innerScope == nil { + return true + } + _, foundObj := innerScope.LookupParent(ident.Name, pos) + if foundObj != obj { + return true + } + // The object must match one of the types that we are searching for. + // TODO(adonovan): opt: use typeutil.Map? + if names, ok := matches[obj.Type()]; ok { + matches[obj.Type()] = append(names, ident.Name) + } else { + // If the object type does not exactly match + // any of the target types, greedily find the first + // target type that the object type can satisfy. + for typ := range matches { + if equivalentTypes(obj.Type(), typ) { + matches[typ] = append(matches[typ], ident.Name) + } + } + } + return true + }) + return matches +} + +func equivalentTypes(want, got types.Type) bool { + if types.Identical(want, got) { + return true + } + // Code segment to help check for untyped equality from (golang/go#32146). + if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { + if lhs, ok := got.Underlying().(*types.Basic); ok { + return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType + } + } + return types.AssignableTo(want, got) +} + +// MakeReadFile returns a simple implementation of the Pass.ReadFile function. +func MakeReadFile(pass *analysis.Pass) func(filename string) ([]byte, error) { + return func(filename string) ([]byte, error) { + if err := CheckReadable(pass, filename); err != nil { + return nil, err + } + return os.ReadFile(filename) + } +} + +// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. +func CheckReadable(pass *analysis.Pass, filename string) error { + if slicesContains(pass.OtherFiles, filename) || + slicesContains(pass.IgnoredFiles, filename) { + return nil + } + for _, f := range pass.Files { + // TODO(adonovan): use go1.20 f.FileStart + if pass.Fset.File(f.Pos()).Name() == filename { + return nil + } + } + return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) +} + +// TODO(adonovan): use go1.21 slices.Contains. +func slicesContains[S ~[]E, E comparable](slice S, x E) bool { + for _, elem := range slice { + if elem == x { + return true + } + } + return false +} + +// AddImport checks whether this file already imports pkgpath and +// that import is in scope at pos. If so, it returns the name under +// which it was imported and a zero edit. Otherwise, it adds a new +// import of pkgpath, using a name derived from the preferred name, +// and returns the chosen name along with the edit for the new import. +// +// It does not mutate its arguments. +func AddImport(info *types.Info, file *ast.File, pos token.Pos, pkgpath, preferredName string) (name string, newImport []analysis.TextEdit) { + // Find innermost enclosing lexical block. + scope := info.Scopes[file].Innermost(pos) + if scope == nil { + panic("no enclosing lexical block") + } + + // Is there an existing import of this package? + // If so, are we in its scope? (not shadowed) + for _, spec := range file.Imports { + pkgname, ok := importedPkgName(info, spec) + if ok && pkgname.Imported().Path() == pkgpath { + if _, obj := scope.LookupParent(pkgname.Name(), pos); obj == pkgname { + return pkgname.Name(), nil + } + } + } + + // We must add a new import. + // Ensure we have a fresh name. + newName := preferredName + for i := 0; ; i++ { + if _, obj := scope.LookupParent(newName, pos); obj == nil { + break // fresh + } + newName = fmt.Sprintf("%s%d", preferredName, i) + } + + // For now, keep it real simple: create a new import + // declaration before the first existing declaration (which + // must exist), including its comments, and let goimports tidy it up. + // + // Use a renaming import whenever the preferred name is not + // available, or the chosen name does not match the last + // segment of its path. + newText := fmt.Sprintf("import %q\n\n", pkgpath) + if newName != preferredName || newName != pathpkg.Base(pkgpath) { + newText = fmt.Sprintf("import %s %q\n\n", newName, pkgpath) + } + decl0 := file.Decls[0] + var before ast.Node = decl0 + switch decl0 := decl0.(type) { + case *ast.GenDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + case *ast.FuncDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + } + return newName, []analysis.TextEdit{{ + Pos: before.Pos(), + End: before.Pos(), + NewText: []byte(newText), + }} +} + +// importedPkgName returns the PkgName object declared by an ImportSpec. +// TODO(adonovan): use go1.22's Info.PkgNameOf. +func importedPkgName(info *types.Info, imp *ast.ImportSpec) (*types.PkgName, bool) { + var obj types.Object + if imp.Name != nil { + obj = info.Defs[imp.Name] + } else { + obj = info.Implicits[imp] + } + pkgname, ok := obj.(*types.PkgName) + return pkgname, ok +} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go new file mode 100644 index 00000000000..39507723d3d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go @@ -0,0 +1,113 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisinternal + +import ( + "fmt" + "go/parser" + "go/token" + "strings" +) + +// MustExtractDoc is like [ExtractDoc] but it panics on error. +// +// To use, define a doc.go file such as: +// +// // Package halting defines an analyzer of program termination. +// // +// // # Analyzer halting +// // +// // halting: reports whether execution will halt. +// // +// // The halting analyzer reports a diagnostic for functions +// // that run forever. To suppress the diagnostics, try inserting +// // a 'break' statement into each loop. +// package halting +// +// import _ "embed" +// +// //go:embed doc.go +// var doc string +// +// And declare your analyzer as: +// +// var Analyzer = &analysis.Analyzer{ +// Name: "halting", +// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// ... +// } +func MustExtractDoc(content, name string) string { + doc, err := ExtractDoc(content, name) + if err != nil { + panic(err) + } + return doc +} + +// ExtractDoc extracts a section of a package doc comment from the +// provided contents of an analyzer package's doc.go file. +// +// A section is a portion of the comment between one heading and +// the next, using this form: +// +// # Analyzer NAME +// +// NAME: SUMMARY +// +// Full description... +// +// where NAME matches the name argument, and SUMMARY is a brief +// verb-phrase that describes the analyzer. The following lines, up +// until the next heading or the end of the comment, contain the full +// description. ExtractDoc returns the portion following the colon, +// which is the form expected by Analyzer.Doc. +// +// Example: +// +// # Analyzer printf +// +// printf: checks consistency of calls to printf +// +// The printf analyzer checks consistency of calls to printf. +// Here is the complete description... +// +// This notation allows a single doc comment to provide documentation +// for multiple analyzers, each in its own section. +// The HTML anchors generated for each heading are predictable. +// +// It returns an error if the content was not a valid Go source file +// containing a package doc comment with a heading of the required +// form. +// +// This machinery enables the package documentation (typically +// accessible via the web at https://pkg.go.dev/) and the command +// documentation (typically printed to a terminal) to be derived from +// the same source and formatted appropriately. +func ExtractDoc(content, name string) (string, error) { + if content == "" { + return "", fmt.Errorf("empty Go source file") + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", content, parser.ParseComments|parser.PackageClauseOnly) + if err != nil { + return "", fmt.Errorf("not a Go source file") + } + if f.Doc == nil { + return "", fmt.Errorf("Go source file has no package doc comment") + } + for _, section := range strings.Split(f.Doc.Text(), "\n# ") { + if body := strings.TrimPrefix(section, "Analyzer "+name); body != section && + body != "" && + body[0] == '\r' || body[0] == '\n' { + body = strings.TrimSpace(body) + rest := strings.TrimPrefix(body, name+":") + if rest == body { + return "", fmt.Errorf("'Analyzer %s' heading not followed by '%s: summary...' line", name, name) + } + return strings.TrimSpace(rest), nil + } + } + return "", fmt.Errorf("package doc comment contains no 'Analyzer %s' heading", name) +} diff --git a/vendor/golang.org/x/tools/internal/diff/diff.go b/vendor/golang.org/x/tools/internal/diff/diff.go new file mode 100644 index 00000000000..a13547b7a7e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/diff.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff computes differences between text files or strings. +package diff + +import ( + "fmt" + "sort" + "strings" +) + +// An Edit describes the replacement of a portion of a text file. +type Edit struct { + Start, End int // byte offsets of the region to replace + New string // the replacement +} + +func (e Edit) String() string { + return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New) +} + +// Apply applies a sequence of edits to the src buffer and returns the +// result. Edits are applied in order of start offset; edits with the +// same start offset are applied in they order they were provided. +// +// Apply returns an error if any edit is out of bounds, +// or if any pair of edits is overlapping. +func Apply(src string, edits []Edit) (string, error) { + edits, size, err := validate(src, edits) + if err != nil { + return "", err + } + + // Apply edits. + out := make([]byte, 0, size) + lastEnd := 0 + for _, edit := range edits { + if lastEnd < edit.Start { + out = append(out, src[lastEnd:edit.Start]...) + } + out = append(out, edit.New...) + lastEnd = edit.End + } + out = append(out, src[lastEnd:]...) + + if len(out) != size { + panic("wrong size") + } + + return string(out), nil +} + +// ApplyBytes is like Apply, but it accepts a byte slice. +// The result is always a new array. +func ApplyBytes(src []byte, edits []Edit) ([]byte, error) { + res, err := Apply(string(src), edits) + return []byte(res), err +} + +// validate checks that edits are consistent with src, +// and returns the size of the patched output. +// It may return a different slice. +func validate(src string, edits []Edit) ([]Edit, int, error) { + if !sort.IsSorted(editsSort(edits)) { + edits = append([]Edit(nil), edits...) + SortEdits(edits) + } + + // Check validity of edits and compute final size. + size := len(src) + lastEnd := 0 + for _, edit := range edits { + if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) { + return nil, 0, fmt.Errorf("diff has out-of-bounds edits") + } + if edit.Start < lastEnd { + return nil, 0, fmt.Errorf("diff has overlapping edits") + } + size += len(edit.New) + edit.Start - edit.End + lastEnd = edit.End + } + + return edits, size, nil +} + +// SortEdits orders a slice of Edits by (start, end) offset. +// This ordering puts insertions (end = start) before deletions +// (end > start) at the same point, but uses a stable sort to preserve +// the order of multiple insertions at the same point. +// (Apply detects multiple deletions at the same point as an error.) +func SortEdits(edits []Edit) { + sort.Stable(editsSort(edits)) +} + +type editsSort []Edit + +func (a editsSort) Len() int { return len(a) } +func (a editsSort) Less(i, j int) bool { + if cmp := a[i].Start - a[j].Start; cmp != 0 { + return cmp < 0 + } + return a[i].End < a[j].End +} +func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// lineEdits expands and merges a sequence of edits so that each +// resulting edit replaces one or more complete lines. +// See ApplyEdits for preconditions. +func lineEdits(src string, edits []Edit) ([]Edit, error) { + edits, _, err := validate(src, edits) + if err != nil { + return nil, err + } + + // Do all deletions begin and end at the start of a line, + // and all insertions end with a newline? + // (This is merely a fast path.) + for _, edit := range edits { + if edit.Start >= len(src) || // insertion at EOF + edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start + edit.End > 0 && src[edit.End-1] != '\n' || // not at line start + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert + goto expand // slow path + } + } + return edits, nil // aligned + +expand: + if len(edits) == 0 { + return edits, nil // no edits (unreachable due to fast path) + } + expanded := make([]Edit, 0, len(edits)) // a guess + prev := edits[0] + // TODO(adonovan): opt: start from the first misaligned edit. + // TODO(adonovan): opt: avoid quadratic cost of string += string. + for _, edit := range edits[1:] { + between := src[prev.End:edit.Start] + if !strings.Contains(between, "\n") { + // overlapping lines: combine with previous edit. + prev.New += between + edit.New + prev.End = edit.End + } else { + // non-overlapping lines: flush previous edit. + expanded = append(expanded, expandEdit(prev, src)) + prev = edit + } + } + return append(expanded, expandEdit(prev, src)), nil // flush final edit +} + +// expandEdit returns edit expanded to complete whole lines. +func expandEdit(edit Edit, src string) Edit { + // Expand start left to start of line. + // (delta is the zero-based column number of start.) + start := edit.Start + if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 { + edit.Start -= delta + edit.New = src[start-delta:start] + edit.New + } + + // Expand end right to end of line. + end := edit.End + if end > 0 && src[end-1] != '\n' || + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } + } + edit.New += src[end:edit.End] + + return edit +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/common.go b/vendor/golang.org/x/tools/internal/diff/lcs/common.go new file mode 100644 index 00000000000..c3e82dd2683 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/common.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prepend is only called to extend diagonals +// the backward direction. +func (lcs lcs) prepend(x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// append appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). append is only called +// to extend diagonals in the forward direction. +func (lcs lcs) append(x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/doc.go b/vendor/golang.org/x/tools/internal/diff/lcs/doc.go new file mode 100644 index 00000000000..9029dd20b3d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + b | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + c | | | | | | | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + āŠ™ ------- āŠ™ ------- āŠ™ -------(3/3,6)------- āŠ™ -------(3/5,6)-------(4/6,6) + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ ------- āŠ™ -------(2/3,5)------- āŠ™ ------- āŠ™ ------- āŠ™ + b | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ -------(3/5,4)------- āŠ™ + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ -------(1/2,3)-------(2/3,3)------- āŠ™ ------- āŠ™ ------- āŠ™ + c | | | | | | | + āŠ™ ------- āŠ™ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | | | ___/ā€¾ā€¾ā€¾ | ___/ā€¾ā€¾ā€¾ | + āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ ------- āŠ™ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | + āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ -------- āŠ™ --------(:0/5,5)-------- āŠ™ + b | | | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | | | + āŠ™ -------- āŠ™ -------- āŠ™ --------(:1/3,4)-------- āŠ™ -------- āŠ™ -------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | + (:3/0,3)--------(:2/1,3)-------- āŠ™ --------(:2/3,3)--------(:1/4,3)-------- āŠ™ -------- āŠ™ + c | | | | | | | + āŠ™ -------- āŠ™ -------- āŠ™ --------(:3/3,2)--------(:2/4,2)-------- āŠ™ -------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | + (:3/0,1)-------- āŠ™ -------- āŠ™ -------- āŠ™ --------(:3/4,1)-------- āŠ™ -------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ | + (:4/0,0)-------- āŠ™ -------- āŠ™ -------- āŠ™ --------(:4/4,0)-------- āŠ™ -------- āŠ™ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | + āŠ™ --------- āŠ™ --------- āŠ™ --------- (2/3,5) --------- āŠ™ --------- (:0/5,5)--------- āŠ™ + b | | | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | | | + āŠ™ --------- āŠ™ --------- āŠ™ --------- (:1/3,4)--------- āŠ™ --------- āŠ™ --------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | + āŠ™ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- āŠ™ --------- āŠ™ + c | | | | | | | + āŠ™ --------- āŠ™ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- āŠ™ --------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | + āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ + a | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | | | ____/ā€¾ā€¾ā€¾ā€¾ | ____/ā€¾ā€¾ā€¾ā€¾ | + āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ --------- āŠ™ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/git.sh b/vendor/golang.org/x/tools/internal/diff/lcs/git.sh new file mode 100644 index 00000000000..b25ba4aac74 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/git.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/golang/completion/completion.go +# file=internal/golang/diagnostics.go +file=internal/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/labels.go b/vendor/golang.org/x/tools/internal/diff/lcs/labels.go new file mode 100644 index 00000000000..504913d1da3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/vendor/golang.org/x/tools/internal/diff/lcs/old.go new file mode 100644 index 00000000000..4353da15ba9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// TODO(adonovan): remove unclear references to "old" in this package. + +import ( + "fmt" +) + +// A Diff is a replacement of a portion of A by a portion of B. +type Diff struct { + Start, End int // offsets of portion to delete in A + ReplStart, ReplEnd int // offset of replacement text in B +} + +// DiffStrings returns the differences between two strings. +// It does not respect rune boundaries. +func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } + +// DiffBytes returns the differences between two byte sequences. +// It does not respect rune boundaries. +func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } + +// DiffRunes returns the differences between two rune sequences. +func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } + +func diff(seqs sequences) []Diff { + // A limit on how deeply the LCS algorithm should search. The value is just a guess. + const maxDiffs = 100 + diff, _ := compute(seqs, twosided, maxDiffs/2) + return diff +} + +// compute computes the list of differences between two sequences, +// along with the LCS. It is exercised directly by tests. +// The algorithm is one of {forward, backward, twosided}. +func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + alen, blen := seqs.lengths() + g := &editGraph{ + seqs: seqs, + vf: newtriang(limit), + vb: newtriang(limit), + limit: limit, + ux: alen, + uy: blen, + delta: alen - blen, + } + lcs := algo(g) + diffs := lcs.toDiffs(alen, blen) + return diffs, lcs +} + +// editGraph carries the information for computing the lcs of two sequences. +type editGraph struct { + seqs sequences + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// toDiffs converts an LCS to a list of edits. +func (lcs lcs) toDiffs(alen, blen int) []Diff { + var diffs []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X || pb < l.Y { + diffs = append(diffs, Diff{pa, l.X, pb, l.Y}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < alen || pb < blen { + diffs = append(diffs, Diff{pa, alen, pb, blen}) + } + return diffs +} + +// --- FORWARD --- + +// fdone decides if the forward path has reached the upper right +// corner of the rectangle. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func forward(e *editGraph) lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + ans = ans.prepend(x+e.lx-1, y+e.ly-1) + x-- + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + if x < e.ux && y < e.uy { + x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy) + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- + +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func backward(e *editGraph) lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + ans = ans.append(x+e.lx, y+e.ly) + x++ + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + if x > 0 && y > 0 { + x -= e.seqs.commonSuffixLen(0, x, 0, y) + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func twosided(e *editGraph) lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, forward(e)...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go new file mode 100644 index 00000000000..2d72d263043 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// This file defines the abstract sequence over which the LCS algorithm operates. + +// sequences abstracts a pair of sequences, A and B. +type sequences interface { + lengths() (int, int) // len(A), len(B) + commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj])) + commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) +} + +type stringSeqs struct{ a, b string } + +func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) +} +func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) +} + +// The explicit capacity in s[i:j:j] leads to more efficient code. + +type bytesSeqs struct{ a, b []byte } + +func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type runesSeqs struct{ a, b []rune } + +func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +// TODO(adonovan): optimize these functions using ideas from: +// - https://go.dev/cl/408116 common.go +// - https://go.dev/cl/421435 xor_generic.go + +// TODO(adonovan): factor using generics when available, +// but measure performance impact. + +// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} + +// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} diff --git a/vendor/golang.org/x/tools/internal/diff/ndiff.go b/vendor/golang.org/x/tools/internal/diff/ndiff.go new file mode 100644 index 00000000000..fbef4d730c5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/ndiff.go @@ -0,0 +1,99 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "unicode/utf8" + + "golang.org/x/tools/internal/diff/lcs" +) + +// Strings computes the differences between two strings. +// The resulting edits respect rune boundaries. +func Strings(before, after string) []Edit { + if before == after { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + // TODO(adonovan): opt: specialize diffASCII for strings. + return diffASCII([]byte(before), []byte(after)) + } + return diffRunes([]rune(before), []rune(after)) +} + +// Bytes computes the differences between two byte slices. +// The resulting edits respect rune boundaries. +func Bytes(before, after []byte) []Edit { + if bytes.Equal(before, after) { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + return diffASCII(before, after) + } + return diffRunes(runes(before), runes(after)) +} + +func diffASCII(before, after []byte) []Edit { + diffs := lcs.DiffBytes(before, after) + + // Convert from LCS diffs. + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])} + } + return res +} + +func diffRunes(before, after []rune) []Edit { + diffs := lcs.DiffRunes(before, after) + + // The diffs returned by the lcs package use indexes + // into whatever slice was passed in. + // Convert rune offsets to byte offsets. + res := make([]Edit, len(diffs)) + lastEnd := 0 + utf8Len := 0 + for i, d := range diffs { + utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits + start := utf8Len + utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit + res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])} + lastEnd = d.End + } + return res +} + +// runes is like []rune(string(bytes)) without the duplicate allocation. +func runes(bytes []byte) []rune { + n := utf8.RuneCount(bytes) + runes := make([]rune, n) + for i := 0; i < n; i++ { + r, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + runes[i] = r + } + return runes +} + +// runesLen returns the length in bytes of the UTF-8 encoding of runes. +func runesLen(runes []rune) (len int) { + for _, r := range runes { + len += utf8.RuneLen(r) + } + return len +} + +// isASCII reports whether s contains only ASCII. +func isASCII[S string | []byte](s S) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/diff/unified.go b/vendor/golang.org/x/tools/internal/diff/unified.go new file mode 100644 index 00000000000..cfbda61020a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/unified.go @@ -0,0 +1,251 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "fmt" + "log" + "strings" +) + +// DefaultContextLines is the number of unchanged lines of surrounding +// context displayed by Unified. Use ToUnified to specify a different value. +const DefaultContextLines = 3 + +// Unified returns a unified diff of the old and new strings. +// The old and new labels are the names of the old and new files. +// If the strings are equal, it returns the empty string. +func Unified(oldLabel, newLabel, old, new string) string { + edits := Strings(old, new) + unified, err := ToUnified(oldLabel, newLabel, old, edits, DefaultContextLines) + if err != nil { + // Can't happen: edits are consistent. + log.Fatalf("internal error in diff.Unified: %v", err) + } + return unified +} + +// ToUnified applies the edits to content and returns a unified diff, +// with contextLines lines of (unchanged) context around each diff hunk. +// The old and new labels are the names of the content and result files. +// It returns an error if the edits are inconsistent; see ApplyEdits. +func ToUnified(oldLabel, newLabel, content string, edits []Edit, contextLines int) (string, error) { + u, err := toUnified(oldLabel, newLabel, content, edits, contextLines) + if err != nil { + return "", err + } + return u.String(), nil +} + +// unified represents a set of edits as a unified diff. +type unified struct { + // from is the name of the original file. + from string + // to is the name of the modified file. + to string + // hunks is the set of edit hunks needed to transform the file content. + hunks []*hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type hunk struct { + // The line in the original source where the hunk starts. + fromLine int + // The line in the original source where the hunk finishes. + toLine int + // The set of line based edits to apply. + lines []line +} + +// Line represents a single line operation to apply as part of a Hunk. +type line struct { + // kind is the type of line this represents, deletion, insertion or copy. + kind opKind + // content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + content string +} + +// opKind is used to denote the type of operation a line represents. +type opKind int + +const ( + // opDelete is the operation kind for a line that is present in the input + // but not in the output. + opDelete opKind = iota + // opInsert is the operation kind for a line that is new in the output. + opInsert + // opEqual is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + opEqual +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k opKind) String() string { + switch k { + case opDelete: + return "delete" + case opInsert: + return "insert" + case opEqual: + return "equal" + default: + panic("unknown operation kind") + } +} + +// toUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func toUnified(fromName, toName string, content string, edits []Edit, contextLines int) (unified, error) { + gap := contextLines * 2 + u := unified{ + from: fromName, + to: toName, + } + if len(edits) == 0 { + return u, nil + } + var err error + edits, err = lineEdits(content, edits) // expand to whole lines + if err != nil { + return u, err + } + lines := splitLines(content) + var h *hunk + last := 0 + toLine := 0 + for _, edit := range edits { + // Compute the zero-based line numbers of the edit start and end. + // TODO(adonovan): opt: compute incrementally, avoid O(n^2). + start := strings.Count(content[:edit.Start], "\n") + end := strings.Count(content[:edit.End], "\n") + if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' { + end++ // EOF counts as an implicit newline + } + + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + toLine += start - last + h = &hunk{ + fromLine: start + 1, + toLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-contextLines, start) + h.fromLine -= delta + h.toLine -= delta + } + last = start + for i := start; i < end; i++ { + h.lines = append(h.lines, line{kind: opDelete, content: lines[i]}) + last++ + } + if edit.New != "" { + for _, content := range splitLines(edit.New) { + h.lines = append(h.lines, line{kind: opInsert, content: content}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + return u, nil +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.lines = append(h.lines, line{kind: opEqual, content: lines[i]}) + delta++ + } + return delta +} + +// String converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u unified) String() string { + if len(u.hunks) == 0 { + return "" + } + b := new(strings.Builder) + fmt.Fprintf(b, "--- %s\n", u.from) + fmt.Fprintf(b, "+++ %s\n", u.to) + for _, hunk := range u.hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fromCount++ + case opInsert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(b, "@@") + if fromCount > 1 { + fmt.Fprintf(b, " -%d,%d", hunk.fromLine, fromCount) + } else if hunk.fromLine == 1 && fromCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " -0,0") + } else { + fmt.Fprintf(b, " -%d", hunk.fromLine) + } + if toCount > 1 { + fmt.Fprintf(b, " +%d,%d", hunk.toLine, toCount) + } else if hunk.toLine == 1 && toCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " +0,0") + } else { + fmt.Fprintf(b, " +%d", hunk.toLine) + } + fmt.Fprint(b, " @@\n") + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fmt.Fprintf(b, "-%s", l.content) + case opInsert: + fmt.Fprintf(b, "+%s", l.content) + default: + fmt.Fprintf(b, " %s", l.content) + } + if !strings.HasSuffix(l.content, "\n") { + fmt.Fprintf(b, "\n\\ No newline at end of file\n") + } + } + } + return b.String() +} diff --git a/vendor/golang.org/x/tools/internal/facts/facts.go b/vendor/golang.org/x/tools/internal/facts/facts.go new file mode 100644 index 00000000000..e1c18d373c3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/facts/facts.go @@ -0,0 +1,389 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facts defines a serializable set of analysis.Fact. +// +// It provides a partial implementation of the Fact-related parts of the +// analysis.Pass interface for use in analysis drivers such as "go vet" +// and other build systems. +// +// The serial format is unspecified and may change, so the same version +// of this package must be used for reading and writing serialized facts. +// +// The handling of facts in the analysis system parallels the handling +// of type information in the compiler: during compilation of package P, +// the compiler emits an export data file that describes the type of +// every object (named thing) defined in package P, plus every object +// indirectly reachable from one of those objects. Thus the downstream +// compiler of package Q need only load one export data file per direct +// import of Q, and it will learn everything about the API of package P +// and everything it needs to know about the API of P's dependencies. +// +// Similarly, analysis of package P emits a fact set containing facts +// about all objects exported from P, plus additional facts about only +// those objects of P's dependencies that are reachable from the API of +// package P; the downstream analysis of Q need only load one fact set +// per direct import of Q. +// +// The notion of "exportedness" that matters here is that of the +// compiler. According to the language spec, a method pkg.T.f is +// unexported simply because its name starts with lowercase. But the +// compiler must nonetheless export f so that downstream compilations can +// accurately ascertain whether pkg.T implements an interface pkg.I +// defined as interface{f()}. Exported thus means "described in export +// data". +package facts + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/types" + "io" + "log" + "reflect" + "sort" + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/objectpath" +) + +const debug = false + +// A Set is a set of analysis.Facts. +// +// Decode creates a Set of facts by reading from the imports of a given +// package, and Encode writes out the set. Between these operation, +// the Import and Export methods will query and update the set. +// +// All of Set's methods except String are safe to call concurrently. +type Set struct { + pkg *types.Package + mu sync.Mutex + m map[key]analysis.Fact +} + +type key struct { + pkg *types.Package + obj types.Object // (object facts only) + t reflect.Type +} + +// ImportObjectFact implements analysis.Pass.ImportObjectFact. +func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportObjectFact implements analysis.Pass.ExportObjectFact. +func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) { + if obj.Pkg() != s.pkg { + log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package", + s.pkg, obj, fact) + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +func (s *Set) AllObjectFacts(filter map[reflect.Type]bool) []analysis.ObjectFact { + var facts []analysis.ObjectFact + s.mu.Lock() + for k, v := range s.m { + if k.obj != nil && filter[k.t] { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: v}) + } + } + s.mu.Unlock() + return facts +} + +// ImportPackageFact implements analysis.Pass.ImportPackageFact. +func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := key{pkg: pkg, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportPackageFact implements analysis.Pass.ExportPackageFact. +func (s *Set) ExportPackageFact(fact analysis.Fact) { + key := key{pkg: s.pkg, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +func (s *Set) AllPackageFacts(filter map[reflect.Type]bool) []analysis.PackageFact { + var facts []analysis.PackageFact + s.mu.Lock() + for k, v := range s.m { + if k.obj == nil && filter[k.t] { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: v}) + } + } + s.mu.Unlock() + return facts +} + +// gobFact is the Gob declaration of a serialized fact. +type gobFact struct { + PkgPath string // path of package + Object objectpath.Path // optional path of object relative to package itself + Fact analysis.Fact // type and value of user-defined Fact +} + +// A Decoder decodes the facts from the direct imports of the package +// provided to NewEncoder. A single decoder may be used to decode +// multiple fact sets (e.g. each for a different set of fact types) +// for the same package. Each call to Decode returns an independent +// fact set. +type Decoder struct { + pkg *types.Package + getPackage GetPackageFunc +} + +// NewDecoder returns a fact decoder for the specified package. +// +// It uses a brute-force recursive approach to enumerate all objects +// defined by dependencies of pkg, so that it can learn the set of +// package paths that may be mentioned in the fact encoding. This does +// not scale well; use [NewDecoderFunc] where possible. +func NewDecoder(pkg *types.Package) *Decoder { + // Compute the import map for this package. + // See the package doc comment. + m := importMap(pkg.Imports()) + getPackageFunc := func(path string) *types.Package { return m[path] } + return NewDecoderFunc(pkg, getPackageFunc) +} + +// NewDecoderFunc returns a fact decoder for the specified package. +// +// It calls the getPackage function for the package path string of +// each dependency (perhaps indirect) that it encounters in the +// encoding. If the function returns nil, the fact is discarded. +// +// This function is preferred over [NewDecoder] when the client is +// capable of efficient look-up of packages by package path. +func NewDecoderFunc(pkg *types.Package, getPackage GetPackageFunc) *Decoder { + return &Decoder{ + pkg: pkg, + getPackage: getPackage, + } +} + +// A GetPackageFunc function returns the package denoted by a package path. +type GetPackageFunc = func(pkgPath string) *types.Package + +// Decode decodes all the facts relevant to the analysis of package +// pkgPath. The read function reads serialized fact data from an external +// source for one of pkg's direct imports, identified by package path. +// The empty file is a valid encoding of an empty fact set. +// +// It is the caller's responsibility to call gob.Register on all +// necessary fact types. +// +// Concurrent calls to Decode are safe, so long as the +// [GetPackageFunc] (if any) is also concurrency-safe. +func (d *Decoder) Decode(read func(pkgPath string) ([]byte, error)) (*Set, error) { + // Read facts from imported packages. + // Facts may describe indirectly imported packages, or their objects. + m := make(map[key]analysis.Fact) // one big bucket + for _, imp := range d.pkg.Imports() { + logf := func(format string, args ...interface{}) { + if debug { + prefix := fmt.Sprintf("in %s, importing %s: ", + d.pkg.Path(), imp.Path()) + log.Print(prefix, fmt.Sprintf(format, args...)) + } + } + + // Read the gob-encoded facts. + data, err := read(imp.Path()) + if err != nil { + return nil, fmt.Errorf("in %s, can't import facts for package %q: %v", + d.pkg.Path(), imp.Path(), err) + } + if len(data) == 0 { + continue // no facts + } + var gobFacts []gobFact + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil { + return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err) + } + logf("decoded %d facts: %v", len(gobFacts), gobFacts) + + // Parse each one into a key and a Fact. + for _, f := range gobFacts { + factPkg := d.getPackage(f.PkgPath) // possibly an indirect dependency + if factPkg == nil { + // Fact relates to a dependency that was + // unused in this translation unit. Skip. + logf("no package %q; discarding %v", f.PkgPath, f.Fact) + continue + } + key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)} + if f.Object != "" { + // object fact + obj, err := objectpath.Object(factPkg, f.Object) + if err != nil { + // (most likely due to unexported object) + // TODO(adonovan): audit for other possibilities. + logf("no object for path: %v; discarding %s", err, f.Fact) + continue + } + key.obj = obj + logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj) + } else { + // package fact + logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg) + } + m[key] = f.Fact + } + } + + return &Set{pkg: d.pkg, m: m}, nil +} + +// Encode encodes a set of facts to a memory buffer. +// +// It may fail if one of the Facts could not be gob-encoded, but this is +// a sign of a bug in an Analyzer. +func (s *Set) Encode() []byte { + encoder := new(objectpath.Encoder) + + // TODO(adonovan): opt: use a more efficient encoding + // that avoids repeating PkgPath for each fact. + + // Gather all facts, including those from imported packages. + var gobFacts []gobFact + + s.mu.Lock() + for k, fact := range s.m { + if debug { + log.Printf("%v => %s\n", k, fact) + } + + // Don't export facts that we imported from another + // package, unless they represent fields or methods, + // or package-level types. + // (Facts about packages, and other package-level + // objects, are only obtained from direct imports so + // they needn't be reexported.) + // + // This is analogous to the pruning done by "deep" + // export data for types, but not as precise because + // we aren't careful about which structs or methods + // we rexport: it should be only those referenced + // from the API of s.pkg. + // TODO(adonovan): opt: be more precise. e.g. + // intersect with the set of objects computed by + // importMap(s.pkg.Imports()). + // TODO(adonovan): opt: implement "shallow" facts. + if k.pkg != s.pkg { + if k.obj == nil { + continue // imported package fact + } + if _, isType := k.obj.(*types.TypeName); !isType && + k.obj.Parent() == k.obj.Pkg().Scope() { + continue // imported fact about package-level non-type object + } + } + + var object objectpath.Path + if k.obj != nil { + path, err := encoder.For(k.obj) + if err != nil { + if debug { + log.Printf("discarding fact %s about %s\n", fact, k.obj) + } + continue // object not accessible from package API; discard fact + } + object = path + } + gobFacts = append(gobFacts, gobFact{ + PkgPath: k.pkg.Path(), + Object: object, + Fact: fact, + }) + } + s.mu.Unlock() + + // Sort facts by (package, object, type) for determinism. + sort.Slice(gobFacts, func(i, j int) bool { + x, y := gobFacts[i], gobFacts[j] + if x.PkgPath != y.PkgPath { + return x.PkgPath < y.PkgPath + } + if x.Object != y.Object { + return x.Object < y.Object + } + tx := reflect.TypeOf(x.Fact) + ty := reflect.TypeOf(y.Fact) + if tx != ty { + return tx.String() < ty.String() + } + return false // equal + }) + + var buf bytes.Buffer + if len(gobFacts) > 0 { + if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil { + // Fact encoding should never fail. Identify the culprit. + for _, gf := range gobFacts { + if err := gob.NewEncoder(io.Discard).Encode(gf); err != nil { + fact := gf.Fact + pkgpath := reflect.TypeOf(fact).Elem().PkgPath() + log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q", + fact, err, fact, pkgpath) + } + } + } + } + + if debug { + log.Printf("package %q: encode %d facts, %d bytes\n", + s.pkg.Path(), len(gobFacts), buf.Len()) + } + + return buf.Bytes() +} + +// String is provided only for debugging, and must not be called +// concurrent with any Import/Export method. +func (s *Set) String() string { + var buf bytes.Buffer + buf.WriteString("{") + for k, f := range s.m { + if buf.Len() > 1 { + buf.WriteString(", ") + } + if k.obj != nil { + buf.WriteString(k.obj.String()) + } else { + buf.WriteString(k.pkg.Path()) + } + fmt.Fprintf(&buf, ": %v", f) + } + buf.WriteString("}") + return buf.String() +} diff --git a/vendor/golang.org/x/tools/internal/facts/imports.go b/vendor/golang.org/x/tools/internal/facts/imports.go new file mode 100644 index 00000000000..9f706cd954f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/facts/imports.go @@ -0,0 +1,136 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package facts + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// importMap computes the import map for a package by traversing the +// entire exported API each of its imports. +// +// This is a workaround for the fact that we cannot access the map used +// internally by the types.Importer returned by go/importer. The entries +// in this map are the packages and objects that may be relevant to the +// current analysis unit. +// +// Packages in the map that are only indirectly imported may be +// incomplete (!pkg.Complete()). +// +// This function scales very poorly with packages' transitive object +// references, which can be more than a million for each package near +// the top of a large project. (This was a significant contributor to +// #60621.) +// TODO(adonovan): opt: compute this information more efficiently +// by obtaining it from the internals of the gcexportdata decoder. +func importMap(imports []*types.Package) map[string]*types.Package { + objects := make(map[types.Object]bool) + typs := make(map[types.Type]bool) // Named and TypeParam + packages := make(map[string]*types.Package) + + var addObj func(obj types.Object) + var addType func(T types.Type) + + addObj = func(obj types.Object) { + if !objects[obj] { + objects[obj] = true + addType(obj.Type()) + if pkg := obj.Pkg(); pkg != nil { + packages[pkg.Path()] = pkg + } + } + } + + addType = func(T types.Type) { + switch T := T.(type) { + case *aliases.Alias: + addType(aliases.Unalias(T)) + case *types.Basic: + // nop + case *types.Named: + // Remove infinite expansions of *types.Named by always looking at the origin. + // Some named types with type parameters [that will not type check] have + // infinite expansions: + // type N[T any] struct { F *N[N[T]] } + // importMap() is called on such types when Analyzer.RunDespiteErrors is true. + T = T.Origin() + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Underlying()) + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + if tparams := T.TypeParams(); tparams != nil { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + if targs := T.TypeArgs(); targs != nil { + for i := 0; i < targs.Len(); i++ { + addType(targs.At(i)) + } + } + } + case *types.Pointer: + addType(T.Elem()) + case *types.Slice: + addType(T.Elem()) + case *types.Array: + addType(T.Elem()) + case *types.Chan: + addType(T.Elem()) + case *types.Map: + addType(T.Key()) + addType(T.Elem()) + case *types.Signature: + addType(T.Params()) + addType(T.Results()) + if tparams := T.TypeParams(); tparams != nil { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + addObj(T.Field(i)) + } + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + addObj(T.At(i)) + } + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + for i := 0; i < T.NumEmbeddeds(); i++ { + addType(T.EmbeddedType(i)) // walk Embedded for implicits + } + case *types.Union: + for i := 0; i < T.Len(); i++ { + addType(T.Term(i).Type()) + } + case *types.TypeParam: + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Constraint()) + } + } + } + + for _, imp := range imports { + packages[imp.Path()] = imp + + scope := imp.Scope() + for _, name := range scope.Names() { + addObj(scope.Lookup(name)) + } + } + + return packages +} diff --git a/vendor/golang.org/x/tools/internal/goroot/importcfg.go b/vendor/golang.org/x/tools/internal/goroot/importcfg.go new file mode 100644 index 00000000000..f1cd28e2ec3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/goroot/importcfg.go @@ -0,0 +1,71 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package goroot is a copy of package internal/goroot +// in the main GO repot. It provides a utility to produce +// an importcfg and import path to package file map mapping +// standard library packages to the locations of their export +// data files. +package goroot + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "sync" +) + +// Importcfg returns an importcfg file to be passed to the +// Go compiler that contains the cached paths for the .a files for the +// standard library. +func Importcfg() (string, error) { + var icfg bytes.Buffer + + m, err := PkgfileMap() + if err != nil { + return "", err + } + fmt.Fprintf(&icfg, "# import config") + for importPath, export := range m { + fmt.Fprintf(&icfg, "\npackagefile %s=%s", importPath, export) + } + s := icfg.String() + return s, nil +} + +var ( + stdlibPkgfileMap map[string]string + stdlibPkgfileErr error + once sync.Once +) + +// PkgfileMap returns a map of package paths to the location on disk +// of the .a file for the package. +// The caller must not modify the map. +func PkgfileMap() (map[string]string, error) { + once.Do(func() { + m := make(map[string]string) + output, err := exec.Command("go", "list", "-export", "-e", "-f", "{{.ImportPath}} {{.Export}}", "std", "cmd").Output() + if err != nil { + stdlibPkgfileErr = err + } + for _, line := range strings.Split(string(output), "\n") { + if line == "" { + continue + } + sp := strings.SplitN(line, " ", 2) + if len(sp) != 2 { + err = fmt.Errorf("determining pkgfile map: invalid line in go list output: %q", line) + return + } + importPath, export := sp[0], sp[1] + if export != "" { + m[importPath] = export + } + } + stdlibPkgfileMap = m + }) + return stdlibPkgfileMap, stdlibPkgfileErr +} diff --git a/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go b/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go new file mode 100644 index 00000000000..949f2781619 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import "syscall" + +// The robustio package is copied from cmd/go/internal/robustio, a package used +// by the go command to retry known flaky operations on certain operating systems. + +//go:generate go run copyfiles.go + +// Since the gopls module cannot access internal/syscall/windows, copy a +// necessary constant. +const ERROR_SHARING_VIOLATION syscall.Errno = 32 diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio.go b/vendor/golang.org/x/tools/internal/robustio/robustio.go new file mode 100644 index 00000000000..0a559fc9b80 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio.go @@ -0,0 +1,69 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +import "time" + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like os.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} + +// A FileID uniquely identifies a file in the file system. +// +// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file +// names denote the same file. +// A FileID is comparable, and thus suitable for use as a map key. +type FileID struct { + device, inode uint64 +} + +// GetFileID returns the file system's identifier for the file, and its +// modification time. +// Like os.Stat, it reads through symbolic links. +func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) } diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go b/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go new file mode 100644 index 00000000000..99fd8ebc2ff --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == errFileNotFound + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go b/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go new file mode 100644 index 00000000000..d5c241857b4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || darwin +// +build windows darwin + +package robustio + +import ( + "errors" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 2000 * time.Millisecond + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like os.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = os.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_other.go b/vendor/golang.org/x/tools/internal/robustio/robustio_other.go new file mode 100644 index 00000000000..3a20cac6cf8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_other.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !darwin +// +build !windows,!darwin + +package robustio + +import ( + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go b/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go new file mode 100644 index 00000000000..9fa4cacb5a3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + dir := fi.Sys().(*syscall.Dir) + return FileID{ + device: uint64(dir.Type)<<32 | uint64(dir.Dev), + inode: dir.Qid.Path, + }, fi.ModTime(), nil +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go b/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go new file mode 100644 index 00000000000..cf74865d0b5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + stat := fi.Sys().(*syscall.Stat_t) + return FileID{ + device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux) + inode: stat.Ino, + }, fi.ModTime(), nil +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go b/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go new file mode 100644 index 00000000000..616c32883d6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go @@ -0,0 +1,51 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" + "time" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + ERROR_SHARING_VIOLATION: + return true + } + } + return false +} + +// Note: it may be convenient to have this helper return fs.FileInfo, but +// implementing this is actually quite involved on Windows. Since we only +// currently use mtime, keep it simple. +func getFileID(filename string) (FileID, time.Time, error) { + filename16, err := syscall.UTF16PtrFromString(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0) + if err != nil { + return FileID{}, time.Time{}, err + } + defer syscall.CloseHandle(h) + var i syscall.ByHandleFileInformation + if err := syscall.GetFileInformationByHandle(h, &i); err != nil { + return FileID{}, time.Time{}, err + } + mtime := time.Unix(0, i.LastWriteTime.Nanoseconds()) + return FileID{ + device: uint64(i.VolumeSerialNumber), + inode: uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow), + }, mtime, nil +} diff --git a/vendor/golang.org/x/tools/internal/testenv/exec.go b/vendor/golang.org/x/tools/internal/testenv/exec.go new file mode 100644 index 00000000000..f2ab5f5eb8d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/exec.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testenv + +import ( + "context" + "flag" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "sync" + "testing" + "time" +) + +// HasExec reports whether the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +func HasExec() bool { + switch runtime.GOOS { + case "aix", + "android", + "darwin", + "dragonfly", + "freebsd", + "illumos", + "linux", + "netbsd", + "openbsd", + "plan9", + "solaris", + "windows": + // Known OS that isn't ios or wasm; assume that exec works. + return true + + case "ios", "js", "wasip1": + // ios has an exec syscall but on real iOS devices it might return a + // permission error. In an emulated environment (such as a Corellium host) + // it might succeed, so try it and find out. + // + // As of 2023-04-19 wasip1 and js don't have exec syscalls at all, but we + // may as well use the same path so that this branch can be tested without + // an ios environment. + fallthrough + + default: + tryExecOnce.Do(func() { + exe, err := os.Executable() + if err != nil { + return + } + if flag.Lookup("test.list") == nil { + // We found the executable, but we don't know how to run it in a way + // that should succeed without side-effects. Just forget it. + return + } + // We know that a test executable exists and can run, because we're + // running it now. Use it to check for overall exec support, but be sure + // to remove any environment variables that might trigger non-default + // behavior in a custom TestMain. + cmd := exec.Command(exe, "-test.list=^$") + cmd.Env = []string{} + if err := cmd.Run(); err == nil { + tryExecOk = true + } + }) + return tryExecOk + } +} + +var ( + tryExecOnce sync.Once + tryExecOk bool +) + +// NeedsExec checks that the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +// If not, NeedsExec calls t.Skip with an explanation. +func NeedsExec(t testing.TB) { + if !HasExec() { + t.Skipf("skipping test: cannot exec subprocess on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +// CommandContext is like exec.CommandContext, but: +// - skips t if the platform does not support os/exec, +// - if supported, sends SIGQUIT instead of SIGKILL in its Cancel function +// - if the test has a deadline, adds a Context timeout and (if supported) WaitDelay +// for an arbitrary grace period before the test's deadline expires, +// - if Cmd has the Cancel field, fails the test if the command is canceled +// due to the test's deadline, and +// - sets a Cleanup function that verifies that the test did not leak a subprocess. +func CommandContext(t testing.TB, ctx context.Context, name string, args ...string) *exec.Cmd { + t.Helper() + NeedsExec(t) + + var ( + cancelCtx context.CancelFunc + gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging) + ) + + if td, ok := Deadline(t); ok { + // Start with a minimum grace period, just long enough to consume the + // output of a reasonable program after it terminates. + gracePeriod = 100 * time.Millisecond + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { + scale, err := strconv.Atoi(s) + if err != nil { + t.Fatalf("invalid GO_TEST_TIMEOUT_SCALE: %v", err) + } + gracePeriod *= time.Duration(scale) + } + + // If time allows, increase the termination grace period to 5% of the + // test's remaining time. + testTimeout := time.Until(td) + if gp := testTimeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + // When we run commands that execute subprocesses, we want to reserve two + // grace periods to clean up: one for the delay between the first + // termination signal being sent (via the Cancel callback when the Context + // expires) and the process being forcibly terminated (via the WaitDelay + // field), and a second one for the delay between the process being + // terminated and the test logging its output for debugging. + // + // (We want to ensure that the test process itself has enough time to + // log the output before it is also terminated.) + cmdTimeout := testTimeout - 2*gracePeriod + + if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > cmdTimeout { + // Either ctx doesn't have a deadline, or its deadline would expire + // after (or too close before) the test has already timed out. + // Add a shorter timeout so that the test will produce useful output. + ctx, cancelCtx = context.WithTimeout(ctx, cmdTimeout) + } + } + + cmd := exec.CommandContext(ctx, name, args...) + + // Use reflection to set the Cancel and WaitDelay fields, if present. + // TODO(bcmills): When we no longer support Go versions below 1.20, + // remove the use of reflect and assume that the fields are always present. + rc := reflect.ValueOf(cmd).Elem() + + if rCancel := rc.FieldByName("Cancel"); rCancel.IsValid() { + rCancel.Set(reflect.ValueOf(func() error { + if cancelCtx != nil && ctx.Err() == context.DeadlineExceeded { + // The command timed out due to running too close to the test's deadline + // (because we specifically set a shorter Context deadline for that + // above). There is no way the test did that intentionally ā€” it's too + // close to the wire! ā€” so mark it as a test failure. That way, if the + // test expects the command to fail for some other reason, it doesn't + // have to distinguish between that reason and a timeout. + t.Errorf("test timed out while running command: %v", cmd) + } else { + // The command is being terminated due to ctx being canceled, but + // apparently not due to an explicit test deadline that we added. + // Log that information in case it is useful for diagnosing a failure, + // but don't actually fail the test because of it. + t.Logf("%v: terminating command: %v", ctx.Err(), cmd) + } + return cmd.Process.Signal(Sigquit) + })) + } + + if rWaitDelay := rc.FieldByName("WaitDelay"); rWaitDelay.IsValid() { + rWaitDelay.Set(reflect.ValueOf(gracePeriod)) + } + + t.Cleanup(func() { + if cancelCtx != nil { + cancelCtx() + } + if cmd.Process != nil && cmd.ProcessState == nil { + t.Errorf("command was started, but test did not wait for it to complete: %v", cmd) + } + }) + + return cmd +} + +// Command is like exec.Command, but applies the same changes as +// testenv.CommandContext (with a default Context). +func Command(t testing.TB, name string, args ...string) *exec.Cmd { + t.Helper() + return CommandContext(t, context.Background(), name, args...) +} diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv.go b/vendor/golang.org/x/tools/internal/testenv/testenv.go new file mode 100644 index 00000000000..d4a17ce039a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/testenv.go @@ -0,0 +1,492 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testenv contains helper functions for skipping tests +// based on which tools are present in the environment. +package testenv + +import ( + "bytes" + "fmt" + "go/build" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/debug" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/internal/goroot" +) + +// packageMainIsDevel reports whether the module containing package main +// is a development version (if module information is available). +func packageMainIsDevel() bool { + info, ok := debug.ReadBuildInfo() + if !ok { + // Most test binaries currently lack build info, but this should become more + // permissive once https://golang.org/issue/33976 is fixed. + return true + } + + // Note: info.Main.Version describes the version of the module containing + // package main, not the version of ā€œthe main moduleā€. + // See https://golang.org/issue/33975. + return info.Main.Version == "(devel)" +} + +var checkGoBuild struct { + once sync.Once + err error +} + +// HasTool reports an error if the required tool is not available in PATH. +// +// For certain tools, it checks that the tool executable is correct. +func HasTool(tool string) error { + if tool == "cgo" { + enabled, err := cgoEnabled(false) + if err != nil { + return fmt.Errorf("checking cgo: %v", err) + } + if !enabled { + return fmt.Errorf("cgo not enabled") + } + return nil + } + + _, err := exec.LookPath(tool) + if err != nil { + return err + } + + switch tool { + case "patch": + // check that the patch tools supports the -o argument + temp, err := os.CreateTemp("", "patch-test") + if err != nil { + return err + } + temp.Close() + defer os.Remove(temp.Name()) + cmd := exec.Command(tool, "-o", temp.Name()) + if err := cmd.Run(); err != nil { + return err + } + + case "go": + checkGoBuild.once.Do(func() { + if runtime.GOROOT() != "" { + // Ensure that the 'go' command found by exec.LookPath is from the correct + // GOROOT. Otherwise, 'some/path/go test ./...' will test against some + // version of the 'go' binary other than 'some/path/go', which is almost + // certainly not what the user intended. + out, err := exec.Command(tool, "env", "GOROOT").Output() + if err != nil { + if exit, ok := err.(*exec.ExitError); ok && len(exit.Stderr) > 0 { + err = fmt.Errorf("%w\nstderr:\n%s)", err, exit.Stderr) + } + checkGoBuild.err = err + return + } + GOROOT := strings.TrimSpace(string(out)) + if GOROOT != runtime.GOROOT() { + checkGoBuild.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT()) + return + } + } + + dir, err := os.MkdirTemp("", "testenv-*") + if err != nil { + checkGoBuild.err = err + return + } + defer os.RemoveAll(dir) + + mainGo := filepath.Join(dir, "main.go") + if err := os.WriteFile(mainGo, []byte("package main\nfunc main() {}\n"), 0644); err != nil { + checkGoBuild.err = err + return + } + cmd := exec.Command("go", "build", "-o", os.DevNull, mainGo) + cmd.Dir = dir + if out, err := cmd.CombinedOutput(); err != nil { + if len(out) > 0 { + checkGoBuild.err = fmt.Errorf("%v: %v\n%s", cmd, err, out) + } else { + checkGoBuild.err = fmt.Errorf("%v: %v", cmd, err) + } + } + }) + if checkGoBuild.err != nil { + return checkGoBuild.err + } + + case "diff": + // Check that diff is the GNU version, needed for the -u argument and + // to report missing newlines at the end of files. + out, err := exec.Command(tool, "-version").Output() + if err != nil { + return err + } + if !bytes.Contains(out, []byte("GNU diffutils")) { + return fmt.Errorf("diff is not the GNU version") + } + } + + return nil +} + +func cgoEnabled(bypassEnvironment bool) (bool, error) { + cmd := exec.Command("go", "env", "CGO_ENABLED") + if bypassEnvironment { + cmd.Env = append(append([]string(nil), os.Environ()...), "CGO_ENABLED=") + } + out, err := cmd.Output() + if err != nil { + if exit, ok := err.(*exec.ExitError); ok && len(exit.Stderr) > 0 { + err = fmt.Errorf("%w\nstderr:\n%s", err, exit.Stderr) + } + return false, err + } + enabled := strings.TrimSpace(string(out)) + return enabled == "1", nil +} + +func allowMissingTool(tool string) bool { + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "plan9", "solaris", "windows": + // Known non-mobile OS. Expect a reasonably complete environment. + default: + return true + } + + switch tool { + case "cgo": + if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-nocgo") { + // Explicitly disabled on -nocgo builders. + return true + } + if enabled, err := cgoEnabled(true); err == nil && !enabled { + // No platform support. + return true + } + case "go": + if os.Getenv("GO_BUILDER_NAME") == "illumos-amd64-joyent" { + // Work around a misconfigured builder (see https://golang.org/issue/33950). + return true + } + case "diff": + if os.Getenv("GO_BUILDER_NAME") != "" { + return true + } + case "patch": + if os.Getenv("GO_BUILDER_NAME") != "" { + return true + } + } + + // If a developer is actively working on this test, we expect them to have all + // of its dependencies installed. However, if it's just a dependency of some + // other module (for example, being run via 'go test all'), we should be more + // tolerant of unusual environments. + return !packageMainIsDevel() +} + +// NeedsTool skips t if the named tool is not present in the path. +// As a special case, "cgo" means "go" is present and can compile cgo programs. +func NeedsTool(t testing.TB, tool string) { + err := HasTool(tool) + if err == nil { + return + } + + t.Helper() + if allowMissingTool(tool) { + // TODO(adonovan): if we skip because of (e.g.) + // mismatched go env GOROOT and runtime.GOROOT, don't + // we risk some users not getting the coverage they expect? + // bcmills notes: this shouldn't be a concern as of CL 404134 (Go 1.19). + // We could probably safely get rid of that GOPATH consistency + // check entirely at this point. + t.Skipf("skipping because %s tool not available: %v", tool, err) + } else { + t.Fatalf("%s tool not available: %v", tool, err) + } +} + +// NeedsGoPackages skips t if the go/packages driver (or 'go' tool) implied by +// the current process environment is not present in the path. +func NeedsGoPackages(t testing.TB) { + t.Helper() + + tool := os.Getenv("GOPACKAGESDRIVER") + switch tool { + case "off": + // "off" forces go/packages to use the go command. + tool = "go" + case "": + if _, err := exec.LookPath("gopackagesdriver"); err == nil { + tool = "gopackagesdriver" + } else { + tool = "go" + } + } + + NeedsTool(t, tool) +} + +// NeedsGoPackagesEnv skips t if the go/packages driver (or 'go' tool) implied +// by env is not present in the path. +func NeedsGoPackagesEnv(t testing.TB, env []string) { + t.Helper() + + for _, v := range env { + if strings.HasPrefix(v, "GOPACKAGESDRIVER=") { + tool := strings.TrimPrefix(v, "GOPACKAGESDRIVER=") + if tool == "off" { + NeedsTool(t, "go") + } else { + NeedsTool(t, tool) + } + return + } + } + + NeedsGoPackages(t) +} + +// NeedsGoBuild skips t if the current system can't build programs with ā€œgo buildā€ +// and then run them with os.StartProcess or exec.Command. +// Android doesn't have the userspace go build needs to run, +// and js/wasm doesn't support running subprocesses. +func NeedsGoBuild(t testing.TB) { + t.Helper() + + // This logic was derived from internal/testing.HasGoBuild and + // may need to be updated as that function evolves. + + NeedsTool(t, "go") +} + +// ExitIfSmallMachine emits a helpful diagnostic and calls os.Exit(0) if the +// current machine is a builder known to have scarce resources. +// +// It should be called from within a TestMain function. +func ExitIfSmallMachine() { + switch b := os.Getenv("GO_BUILDER_NAME"); b { + case "linux-arm-scaleway": + // "linux-arm" was renamed to "linux-arm-scaleway" in CL 303230. + fmt.Fprintln(os.Stderr, "skipping test: linux-arm-scaleway builder lacks sufficient memory (https://golang.org/issue/32834)") + case "plan9-arm": + fmt.Fprintln(os.Stderr, "skipping test: plan9-arm builder lacks sufficient memory (https://golang.org/issue/38772)") + case "netbsd-arm-bsiegert", "netbsd-arm64-bsiegert": + // As of 2021-06-02, these builders are running with GO_TEST_TIMEOUT_SCALE=10, + // and there is only one of each. We shouldn't waste those scarce resources + // running very slow tests. + fmt.Fprintf(os.Stderr, "skipping test: %s builder is very slow\n", b) + case "dragonfly-amd64": + // As of 2021-11-02, this builder is running with GO_TEST_TIMEOUT_SCALE=2, + // and seems to have unusually slow disk performance. + fmt.Fprintln(os.Stderr, "skipping test: dragonfly-amd64 has slow disk (https://golang.org/issue/45216)") + case "linux-riscv64-unmatched": + // As of 2021-11-03, this builder is empirically not fast enough to run + // gopls tests. Ideally we should make the tests faster in short mode + // and/or fix them to not assume arbitrary deadlines. + // For now, we'll skip them instead. + fmt.Fprintf(os.Stderr, "skipping test: %s builder is too slow (https://golang.org/issue/49321)\n", b) + default: + switch runtime.GOOS { + case "android", "ios": + fmt.Fprintf(os.Stderr, "skipping test: assuming that %s is resource-constrained\n", runtime.GOOS) + default: + return + } + } + os.Exit(0) +} + +// Go1Point returns the x in Go 1.x. +func Go1Point() int { + for i := len(build.Default.ReleaseTags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(build.Default.ReleaseTags[i], "go1.%d", &version); err != nil { + continue + } + return version + } + panic("bad release tags") +} + +// NeedsGo1Point skips t if the Go version used to run the test is older than +// 1.x. +func NeedsGo1Point(t testing.TB, x int) { + if Go1Point() < x { + t.Helper() + t.Skipf("running Go version %q is version 1.%d, older than required 1.%d", runtime.Version(), Go1Point(), x) + } +} + +// SkipAfterGo1Point skips t if the Go version used to run the test is newer than +// 1.x. +func SkipAfterGo1Point(t testing.TB, x int) { + if Go1Point() > x { + t.Helper() + t.Skipf("running Go version %q is version 1.%d, newer than maximum 1.%d", runtime.Version(), Go1Point(), x) + } +} + +// NeedsLocalhostNet skips t if networking does not work for ports opened +// with "localhost". +func NeedsLocalhostNet(t testing.TB) { + switch runtime.GOOS { + case "js", "wasip1": + t.Skipf(`Listening on "localhost" fails on %s; see https://go.dev/issue/59718`, runtime.GOOS) + } +} + +// Deadline returns the deadline of t, if known, +// using the Deadline method added in Go 1.15. +func Deadline(t testing.TB) (time.Time, bool) { + td, ok := t.(interface { + Deadline() (time.Time, bool) + }) + if !ok { + return time.Time{}, false + } + return td.Deadline() +} + +// WriteImportcfg writes an importcfg file used by the compiler or linker to +// dstPath containing entries for the packages in std and cmd in addition +// to the package to package file mappings in additionalPackageFiles. +func WriteImportcfg(t testing.TB, dstPath string, additionalPackageFiles map[string]string) { + importcfg, err := goroot.Importcfg() + for k, v := range additionalPackageFiles { + importcfg += fmt.Sprintf("\npackagefile %s=%s", k, v) + } + if err != nil { + t.Fatalf("preparing the importcfg failed: %s", err) + } + os.WriteFile(dstPath, []byte(importcfg), 0655) + if err != nil { + t.Fatalf("writing the importcfg failed: %s", err) + } +} + +var ( + gorootOnce sync.Once + gorootPath string + gorootErr error +) + +func findGOROOT() (string, error) { + gorootOnce.Do(func() { + gorootPath = runtime.GOROOT() + if gorootPath != "" { + // If runtime.GOROOT() is non-empty, assume that it is valid. (It might + // not be: for example, the user may have explicitly set GOROOT + // to the wrong directory.) + return + } + + cmd := exec.Command("go", "env", "GOROOT") + out, err := cmd.Output() + if err != nil { + gorootErr = fmt.Errorf("%v: %v", cmd, err) + } + gorootPath = strings.TrimSpace(string(out)) + }) + + return gorootPath, gorootErr +} + +// GOROOT reports the path to the directory containing the root of the Go +// project source tree. This is normally equivalent to runtime.GOROOT, but +// works even if the test binary was built with -trimpath. +// +// If GOROOT cannot be found, GOROOT skips t if t is non-nil, +// or panics otherwise. +func GOROOT(t testing.TB) string { + path, err := findGOROOT() + if err != nil { + if t == nil { + panic(err) + } + t.Helper() + t.Skip(err) + } + return path +} + +// NeedsLocalXTools skips t if the golang.org/x/tools module is replaced and +// its replacement directory does not exist (or does not contain the module). +func NeedsLocalXTools(t testing.TB) { + t.Helper() + + NeedsTool(t, "go") + + cmd := Command(t, "go", "list", "-f", "{{with .Replace}}{{.Dir}}{{end}}", "-m", "golang.org/x/tools") + out, err := cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + t.Skipf("skipping test: %v: %v\n%s", cmd, err, ee.Stderr) + } + t.Skipf("skipping test: %v: %v", cmd, err) + } + + dir := string(bytes.TrimSpace(out)) + if dir == "" { + // No replacement directory, and (since we didn't set -e) no error either. + // Maybe x/tools isn't replaced at all (as in a gopls release, or when + // using a go.work file that includes the x/tools module). + return + } + + // We found the directory where x/tools would exist if we're in a clone of the + // repo. Is it there? (If not, we're probably in the module cache instead.) + modFilePath := filepath.Join(dir, "go.mod") + b, err := os.ReadFile(modFilePath) + if err != nil { + t.Skipf("skipping test: x/tools replacement not found: %v", err) + } + modulePath := modfile.ModulePath(b) + + if want := "golang.org/x/tools"; modulePath != want { + t.Skipf("skipping test: %s module path is %q, not %q", modFilePath, modulePath, want) + } +} + +// NeedsGoExperiment skips t if the current process environment does not +// have a GOEXPERIMENT flag set. +func NeedsGoExperiment(t testing.TB, flag string) { + t.Helper() + + goexp := os.Getenv("GOEXPERIMENT") + set := false + for _, f := range strings.Split(goexp, ",") { + if f == "" { + continue + } + if f == "none" { + // GOEXPERIMENT=none disables all experiment flags. + set = false + break + } + val := true + if strings.HasPrefix(f, "no") { + f, val = f[2:], false + } + if f == flag { + set = val + } + } + if !set { + t.Skipf("skipping test: flag %q is not set in GOEXPERIMENT=%q", flag, goexp) + } +} diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go b/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go new file mode 100644 index 00000000000..e9ce0d3649d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) +// +build !unix,!aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package testenv + +import "os" + +// Sigquit is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var Sigquit = os.Kill diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go b/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go new file mode 100644 index 00000000000..bc6af1ff81d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package testenv + +import "syscall" + +// Sigquit is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var Sigquit = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/txtar/archive.go b/vendor/golang.org/x/tools/txtar/archive.go new file mode 100644 index 00000000000..fd95f1e64a1 --- /dev/null +++ b/vendor/golang.org/x/tools/txtar/archive.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package txtar implements a trivial text-based file archive format. +// +// The goals for the format are: +// +// - be trivial enough to create and edit by hand. +// - be able to store trees of text files describing go command test cases. +// - diff nicely in git history and code reviews. +// +// Non-goals include being a completely general archive format, +// storing binary data, storing file modes, storing special files like +// symbolic links, and so on. +// +// # Txtar format +// +// A txtar archive is zero or more comment lines and then a sequence of file entries. +// Each file entry begins with a file marker line of the form "-- FILENAME --" +// and is followed by zero or more file content lines making up the file data. +// The comment or file content ends at the next file marker line. +// The file marker line must begin with the three-byte sequence "-- " +// and end with the three-byte sequence " --", but the enclosed +// file name can be surrounding by additional white space, +// all of which is stripped. +// +// If the txtar file is missing a trailing newline on the final line, +// parsers should consider a final newline to be present anyway. +// +// There are no possible syntax errors in a txtar archive. +package txtar + +import ( + "bytes" + "fmt" + "os" + "strings" +) + +// An Archive is a collection of files. +type Archive struct { + Comment []byte + Files []File +} + +// A File is a single file in an archive. +type File struct { + Name string // name of file ("foo/bar.txt") + Data []byte // text content of file +} + +// Format returns the serialized form of an Archive. +// It is assumed that the Archive data structure is well-formed: +// a.Comment and all a.File[i].Data contain no file marker lines, +// and all a.File[i].Name is non-empty. +func Format(a *Archive) []byte { + var buf bytes.Buffer + buf.Write(fixNL(a.Comment)) + for _, f := range a.Files { + fmt.Fprintf(&buf, "-- %s --\n", f.Name) + buf.Write(fixNL(f.Data)) + } + return buf.Bytes() +} + +// ParseFile parses the named file as an archive. +func ParseFile(file string) (*Archive, error) { + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + return Parse(data), nil +} + +// Parse parses the serialized form of an Archive. +// The returned Archive holds slices of data. +func Parse(data []byte) *Archive { + a := new(Archive) + var name string + a.Comment, name, data = findFileMarker(data) + for name != "" { + f := File{name, nil} + f.Data, name, data = findFileMarker(data) + a.Files = append(a.Files, f) + } + return a +} + +var ( + newlineMarker = []byte("\n-- ") + marker = []byte("-- ") + markerEnd = []byte(" --") +) + +// findFileMarker finds the next file marker in data, +// extracts the file name, and returns the data before the marker, +// the file name, and the data after the marker. +// If there is no next marker, findFileMarker returns before = fixNL(data), name = "", after = nil. +func findFileMarker(data []byte) (before []byte, name string, after []byte) { + var i int + for { + if name, after = isMarker(data[i:]); name != "" { + return data[:i], name, after + } + j := bytes.Index(data[i:], newlineMarker) + if j < 0 { + return fixNL(data), "", nil + } + i += j + 1 // positioned at start of new possible marker + } +} + +// isMarker checks whether data begins with a file marker line. +// If so, it returns the name from the line and the data after the line. +// Otherwise it returns name == "" with an unspecified after. +func isMarker(data []byte) (name string, after []byte) { + if !bytes.HasPrefix(data, marker) { + return "", nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + data, after = data[:i], data[i+1:] + } + if !(bytes.HasSuffix(data, markerEnd) && len(data) >= len(marker)+len(markerEnd)) { + return "", nil + } + return strings.TrimSpace(string(data[len(marker) : len(data)-len(markerEnd)])), after +} + +// If data is empty or ends in \n, fixNL returns data. +// Otherwise fixNL returns a new slice consisting of data with a final \n added. +func fixNL(data []byte) []byte { + if len(data) == 0 || data[len(data)-1] == '\n' { + return data + } + d := make([]byte, len(data)+1) + copy(d, data) + d[len(data)] = '\n' + return d +} diff --git a/vendor/golang.org/x/tools/txtar/fs.go b/vendor/golang.org/x/tools/txtar/fs.go new file mode 100644 index 00000000000..e37397e7b71 --- /dev/null +++ b/vendor/golang.org/x/tools/txtar/fs.go @@ -0,0 +1,259 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package txtar + +import ( + "errors" + "fmt" + "io" + "io/fs" + "path" + "time" +) + +// FS returns the file system form of an Archive. +// It returns an error if any of the file names in the archive +// are not valid file system names. +// The archive must not be modified while the FS is in use. +// +// If the file system detects that it has been modified, calls to the +// file system return an ErrModified error. +func FS(a *Archive) (fs.FS, error) { + // Create a filesystem with a root directory. + root := &node{fileinfo: fileinfo{path: ".", mode: readOnlyDir}} + fsys := &filesystem{a, map[string]*node{root.path: root}} + + if err := initFiles(fsys); err != nil { + return nil, fmt.Errorf("cannot create fs.FS from txtar.Archive: %s", err) + } + return fsys, nil +} + +const ( + readOnly fs.FileMode = 0o444 // read only mode + readOnlyDir = readOnly | fs.ModeDir +) + +// ErrModified indicates that file system returned by FS +// noticed that the underlying archive has been modified +// since the call to FS. Detection of modification is best effort, +// to help diagnose misuse of the API, and is not guaranteed. +var ErrModified error = errors.New("txtar.Archive has been modified during txtar.FS") + +// A filesystem is a simple in-memory file system for txtar archives, +// represented as a map from valid path names to information about the +// files or directories they represent. +// +// File system operations are read only. Modifications to the underlying +// *Archive may race. To help prevent this, the filesystem tries +// to detect modification during Open and return ErrModified if it +// is able to detect a modification. +type filesystem struct { + ar *Archive + nodes map[string]*node +} + +// node is a file or directory in the tree of a filesystem. +type node struct { + fileinfo // fs.FileInfo and fs.DirEntry implementation + idx int // index into ar.Files (for files) + entries []fs.DirEntry // subdirectories and files (for directories) +} + +var _ fs.FS = (*filesystem)(nil) +var _ fs.DirEntry = (*node)(nil) + +// initFiles initializes fsys from fsys.ar.Files. Returns an error if there are any +// invalid file names or collisions between file or directories. +func initFiles(fsys *filesystem) error { + for idx, file := range fsys.ar.Files { + name := file.Name + if !fs.ValidPath(name) { + return fmt.Errorf("file %q is an invalid path", name) + } + + n := &node{idx: idx, fileinfo: fileinfo{path: name, size: len(file.Data), mode: readOnly}} + if err := insert(fsys, n); err != nil { + return err + } + } + return nil +} + +// insert adds node n as an entry to its parent directory within the filesystem. +func insert(fsys *filesystem, n *node) error { + if m := fsys.nodes[n.path]; m != nil { + return fmt.Errorf("duplicate path %q", n.path) + } + fsys.nodes[n.path] = n + + // fsys.nodes contains "." to prevent infinite loops. + parent, err := directory(fsys, path.Dir(n.path)) + if err != nil { + return err + } + parent.entries = append(parent.entries, n) + return nil +} + +// directory returns the directory node with the path dir and lazily-creates it +// if it does not exist. +func directory(fsys *filesystem, dir string) (*node, error) { + if m := fsys.nodes[dir]; m != nil && m.IsDir() { + return m, nil // pre-existing directory + } + + n := &node{fileinfo: fileinfo{path: dir, mode: readOnlyDir}} + if err := insert(fsys, n); err != nil { + return nil, err + } + return n, nil +} + +// dataOf returns the data associated with the file t. +// May return ErrModified if fsys.ar has been modified. +func dataOf(fsys *filesystem, n *node) ([]byte, error) { + if n.idx >= len(fsys.ar.Files) { + return nil, ErrModified + } + + f := fsys.ar.Files[n.idx] + if f.Name != n.path || len(f.Data) != n.size { + return nil, ErrModified + } + return f.Data, nil +} + +func (fsys *filesystem) Open(name string) (fs.File, error) { + if !fs.ValidPath(name) { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} + } + + n := fsys.nodes[name] + switch { + case n == nil: + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} + case n.IsDir(): + return &openDir{fileinfo: n.fileinfo, entries: n.entries}, nil + default: + data, err := dataOf(fsys, n) + if err != nil { + return nil, err + } + return &openFile{fileinfo: n.fileinfo, data: data}, nil + } +} + +func (fsys *filesystem) ReadFile(name string) ([]byte, error) { + file, err := fsys.Open(name) + if err != nil { + return nil, err + } + if file, ok := file.(*openFile); ok { + // TODO: use slices.Clone once x/tools has 1.21 available. + cp := make([]byte, file.size) + copy(cp, file.data) + return cp, err + } + return nil, &fs.PathError{Op: "read", Path: name, Err: fs.ErrInvalid} +} + +// A fileinfo implements fs.FileInfo and fs.DirEntry for a given archive file. +type fileinfo struct { + path string // unique path to the file or directory within a filesystem + size int + mode fs.FileMode +} + +var _ fs.FileInfo = (*fileinfo)(nil) +var _ fs.DirEntry = (*fileinfo)(nil) + +func (i *fileinfo) Name() string { return path.Base(i.path) } +func (i *fileinfo) Size() int64 { return int64(i.size) } +func (i *fileinfo) Mode() fs.FileMode { return i.mode } +func (i *fileinfo) Type() fs.FileMode { return i.mode.Type() } +func (i *fileinfo) ModTime() time.Time { return time.Time{} } +func (i *fileinfo) IsDir() bool { return i.mode&fs.ModeDir != 0 } +func (i *fileinfo) Sys() any { return nil } +func (i *fileinfo) Info() (fs.FileInfo, error) { return i, nil } + +// An openFile is a regular (non-directory) fs.File open for reading. +type openFile struct { + fileinfo + data []byte + offset int64 +} + +var _ fs.File = (*openFile)(nil) + +func (f *openFile) Stat() (fs.FileInfo, error) { return &f.fileinfo, nil } +func (f *openFile) Close() error { return nil } +func (f *openFile) Read(b []byte) (int, error) { + if f.offset >= int64(len(f.data)) { + return 0, io.EOF + } + if f.offset < 0 { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.data[f.offset:]) + f.offset += int64(n) + return n, nil +} + +func (f *openFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + // offset += 0 + case 1: + offset += f.offset + case 2: + offset += int64(len(f.data)) + } + if offset < 0 || offset > int64(len(f.data)) { + return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid} + } + f.offset = offset + return offset, nil +} + +func (f *openFile) ReadAt(b []byte, offset int64) (int, error) { + if offset < 0 || offset > int64(len(f.data)) { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.data[offset:]) + if n < len(b) { + return n, io.EOF + } + return n, nil +} + +// A openDir is a directory fs.File (so also an fs.ReadDirFile) open for reading. +type openDir struct { + fileinfo + entries []fs.DirEntry + offset int +} + +var _ fs.ReadDirFile = (*openDir)(nil) + +func (d *openDir) Stat() (fs.FileInfo, error) { return &d.fileinfo, nil } +func (d *openDir) Close() error { return nil } +func (d *openDir) Read(b []byte) (int, error) { + return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid} +} + +func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) { + n := len(d.entries) - d.offset + if n == 0 && count > 0 { + return nil, io.EOF + } + if count > 0 && n > count { + n = count + } + list := make([]fs.DirEntry, n) + copy(list, d.entries[d.offset:d.offset+n]) + d.offset += n + return list, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 897a583d5da..0c114d4e31d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -905,6 +905,22 @@ github.com/russross/blackfriday/v2 # github.com/schollz/peerdiscovery v1.7.0 ## explicit; go 1.13 github.com/schollz/peerdiscovery +# github.com/segmentio/asm v1.1.3 +## explicit; go 1.17 +github.com/segmentio/asm/ascii +github.com/segmentio/asm/base64 +github.com/segmentio/asm/cpu +github.com/segmentio/asm/cpu/arm +github.com/segmentio/asm/cpu/arm64 +github.com/segmentio/asm/cpu/cpuid +github.com/segmentio/asm/cpu/x86 +github.com/segmentio/asm/internal/unsafebytes +github.com/segmentio/asm/keyset +# github.com/segmentio/encoding v0.3.4 +## explicit; go 1.14 +github.com/segmentio/encoding/ascii +github.com/segmentio/encoding/iso8601 +github.com/segmentio/encoding/json # github.com/shirou/gopsutil v3.21.11+incompatible ## explicit github.com/shirou/gopsutil/cpu @@ -1145,6 +1161,18 @@ github.com/zenthangplus/goccm # go.etcd.io/bbolt v1.3.6 ## explicit; go 1.12 go.etcd.io/bbolt +# go.lsp.dev/jsonrpc2 v0.10.0 +## explicit; go 1.17 +go.lsp.dev/jsonrpc2 +# go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 +## explicit; go 1.15 +go.lsp.dev/pkg/xcontext +# go.lsp.dev/protocol v0.12.0 +## explicit; go 1.17 +go.lsp.dev/protocol +# go.lsp.dev/uri v0.3.0 +## explicit; go 1.13 +go.lsp.dev/uri # go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic @@ -1290,6 +1318,13 @@ golang.org/x/time/rate ## explicit; go 1.19 golang.org/x/tools/cmd/goimports golang.org/x/tools/cover +golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/analysistest +golang.org/x/tools/go/analysis/internal/analysisflags +golang.org/x/tools/go/analysis/internal/checker +golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/analysis/singlechecker +golang.org/x/tools/go/analysis/unitchecker golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata @@ -1297,20 +1332,28 @@ golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports golang.org/x/tools/internal/aliases +golang.org/x/tools/internal/analysisinternal +golang.org/x/tools/internal/diff +golang.org/x/tools/internal/diff/lcs golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/facts golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/goroot golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/robustio golang.org/x/tools/internal/stdlib +golang.org/x/tools/internal/testenv golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions +golang.org/x/tools/txtar # google.golang.org/protobuf v1.34.2 ## explicit; go 1.20 google.golang.org/protobuf/cmd/protoc-gen-go diff --git a/waku/api.go b/waku/api.go index 5ec6f9ef004..51a645efe9b 100644 --- a/waku/api.go +++ b/waku/api.go @@ -28,11 +28,11 @@ import ( "go.uber.org/zap" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/waku/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rpc" gocommon "github.com/status-im/status-go/common" @@ -412,7 +412,7 @@ func (api *PublicWakuAPI) Messages(ctx context.Context, crit Criteria) (*rpc.Sub if filter := api.w.GetFilter(id); filter != nil { for _, rpcMessage := range toMessage(filter.Retrieve()) { if err := notifier.Notify(rpcSub.ID, rpcMessage); err != nil { - log.Error("Failed to send notification", "err", err) + logutils.ZapLogger().Error("Failed to send notification", zap.Error(err)) } } } diff --git a/waku/common/filter.go b/waku/common/filter.go index 7cef01e9a40..65811c3a4ee 100644 --- a/waku/common/filter.go +++ b/waku/common/filter.go @@ -23,9 +23,11 @@ import ( "fmt" "sync" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) // Filter represents a Waku message filter @@ -181,7 +183,7 @@ func (fs *Filters) NotifyWatchers(env *Envelope, p2pMessage bool) bool { candidates := fs.GetWatchersByTopic(env.Topic) for _, watcher := range candidates { if p2pMessage && !watcher.AllowP2P { - log.Trace(fmt.Sprintf("msg [%x], filter [%s]: p2p messages are not allowed", env.Hash(), watcher.id)) + logutils.ZapLogger().Debug(fmt.Sprintf("msg [%x], filter [%s]: p2p messages are not allowed", env.Hash(), watcher.id)) continue } @@ -193,16 +195,22 @@ func (fs *Filters) NotifyWatchers(env *Envelope, p2pMessage bool) bool { if match { msg = env.Open(watcher) if msg == nil { - log.Trace("processing message: failed to open", "message", env.Hash().Hex(), "filter", watcher.id) + logutils.ZapLogger().Debug("processing message: failed to open", + zap.String("message", env.Hash().Hex()), + zap.String("filter", watcher.id), + ) } } else { - log.Trace("processing message: does not match", "message", env.Hash().Hex(), "filter", watcher.id) + logutils.ZapLogger().Debug("processing message: does not match", + zap.String("message", env.Hash().Hex()), + zap.String("filter", watcher.id), + ) } } if match && msg != nil { msg.P2P = p2pMessage - log.Trace("processing message: decrypted", "hash", env.Hash().Hex()) + logutils.ZapLogger().Debug("processing message: decrypted", zap.Stringer("hash", env.Hash())) if watcher.Src == nil || IsPubKeyEqual(msg.Src, watcher.Src) { watcher.Trigger(msg) } @@ -225,7 +233,7 @@ func (f *Filter) expectsSymmetricEncryption() bool { func (f *Filter) Trigger(msg *ReceivedMessage) { err := f.Messages.Add(msg) if err != nil { - log.Error("failed to add msg into the filters store", "hash", msg.EnvelopeHash, "error", err) + logutils.ZapLogger().Error("failed to add msg into the filters store", zap.Stringer("hash", msg.EnvelopeHash), zap.Error(err)) } } @@ -235,7 +243,7 @@ func (f *Filter) Retrieve() []*ReceivedMessage { msgs, err := f.Messages.Pop() if err != nil { - log.Error("failed to retrieve messages from filter store", "error", err) + logutils.ZapLogger().Error("failed to retrieve messages from filter store", zap.Error(err)) return nil } return msgs diff --git a/waku/common/message.go b/waku/common/message.go index 63f89fae8d0..4817b296613 100644 --- a/waku/common/message.go +++ b/waku/common/message.go @@ -30,10 +30,12 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/ecies" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) // MessageParams specifies the exact way a message should be wrapped @@ -211,7 +213,7 @@ func (msg *SentMessage) appendPadding(params *MessageParams) error { func (msg *SentMessage) sign(key *ecdsa.PrivateKey) error { if IsMessageSigned(msg.Raw[0]) { // this should not happen, but no reason to panic - log.Error("failed to sign the message: already signed") + logutils.ZapLogger().Error("failed to sign the message: already signed") return nil } @@ -371,7 +373,7 @@ func (msg *ReceivedMessage) SigToPubKey() *ecdsa.PublicKey { pub, err := crypto.SigToPub(msg.hash(), msg.Signature) if err != nil { - log.Error("failed to recover public key from signature", "err", err) + logutils.ZapLogger().Error("failed to recover public key from signature", zap.Error(err)) return nil } return pub diff --git a/waku/v1/peer.go b/waku/v1/peer.go index 196d0e0ace4..9882e62d7cb 100644 --- a/waku/v1/peer.go +++ b/waku/v1/peer.go @@ -481,6 +481,8 @@ func (p *Peer) handshake() error { // update executes periodic operations on the peer, including message transmission // and expiration. func (p *Peer) update() { + defer gocommon.LogOnPanic() + // Start the tickers for the updates expire := time.NewTicker(common.ExpirationCycle) transmit := time.NewTicker(common.TransmissionCycle) diff --git a/wakuv2/api.go b/wakuv2/api.go index 49be14a32c1..09ea54ae03b 100644 --- a/wakuv2/api.go +++ b/wakuv2/api.go @@ -26,14 +26,16 @@ import ( "sync" "time" + "go.uber.org/zap" + "github.com/waku-org/go-waku/waku/v2/payload" "github.com/waku-org/go-waku/waku/v2/protocol/pb" + "github.com/status-im/status-go/logutils" "github.com/status-im/status-go/wakuv2/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "google.golang.org/protobuf/proto" @@ -360,7 +362,7 @@ func (api *PublicWakuAPI) Messages(ctx context.Context, crit Criteria) (*rpc.Sub if filter := api.w.GetFilter(id); filter != nil { for _, rpcMessage := range toMessage(filter.Retrieve()) { if err := notifier.Notify(rpcSub.ID, rpcMessage); err != nil { - log.Error("Failed to send notification", "err", err) + logutils.ZapLogger().Error("Failed to send notification", zap.Error(err)) } } } diff --git a/wakuv2/common/filter.go b/wakuv2/common/filter.go index 7630a162ecd..fdfeef6b481 100644 --- a/wakuv2/common/filter.go +++ b/wakuv2/common/filter.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/logutils" ) // Filter represents a Waku message filter @@ -231,7 +231,11 @@ func (fs *Filters) NotifyWatchers(recvMessage *ReceivedMessage) bool { candidates := fs.GetWatchersByTopic(recvMessage.PubsubTopic, recvMessage.ContentTopic) if len(candidates) == 0 { - log.Debug("no filters available for this topic", "message", recvMessage.Hash().Hex(), "pubsubTopic", recvMessage.PubsubTopic, "contentTopic", recvMessage.ContentTopic.String()) + logutils.ZapLogger().Debug("no filters available for this topic", + zap.Stringer("message", recvMessage.Hash()), + zap.String("pubsubTopic", recvMessage.PubsubTopic), + zap.Stringer("contentTopic", &recvMessage.ContentTopic), + ) } for _, watcher := range candidates { @@ -239,14 +243,17 @@ func (fs *Filters) NotifyWatchers(recvMessage *ReceivedMessage) bool { if decodedMsg == nil { decodedMsg = recvMessage.Open(watcher) if decodedMsg == nil { - log.Debug("processing message: failed to open", "message", recvMessage.Hash().Hex(), "filter", watcher.id) + logutils.ZapLogger().Debug("processing message: failed to open", + zap.Stringer("message", recvMessage.Hash()), + zap.String("filter", watcher.id), + ) continue } } if watcher.MatchMessage(decodedMsg) { matched = true - log.Debug("processing message: decrypted", "envelopeHash", recvMessage.Hash().Hex()) + logutils.ZapLogger().Debug("processing message: decrypted", zap.Stringer("envelopeHash", recvMessage.Hash())) if watcher.Src == nil || IsPubKeyEqual(decodedMsg.Src, watcher.Src) { watcher.Trigger(decodedMsg) } @@ -269,7 +276,10 @@ func (f *Filter) expectsSymmetricEncryption() bool { func (f *Filter) Trigger(msg *ReceivedMessage) { err := f.Messages.Add(msg) if err != nil { - log.Error("failed to add msg into the filters store", "hash", msg.Hash(), "error", err) + logutils.ZapLogger().Error("failed to add msg into the filters store", + zap.Stringer("hash", msg.Hash()), + zap.Error(err), + ) } } @@ -278,7 +288,7 @@ func (f *Filter) Trigger(msg *ReceivedMessage) { func (f *Filter) Retrieve() []*ReceivedMessage { msgs, err := f.Messages.Pop() if err != nil { - log.Error("failed to retrieve messages from filter store", "error", err) + logutils.ZapLogger().Error("failed to retrieve messages from filter store", zap.Error(err)) return nil } return msgs diff --git a/wakuv2/common/helpers.go b/wakuv2/common/helpers.go index 887eeab9edc..1c3d0557fb1 100644 --- a/wakuv2/common/helpers.go +++ b/wakuv2/common/helpers.go @@ -6,6 +6,10 @@ import ( "errors" "fmt" mrand "math/rand" + "regexp" + "strings" + + "github.com/multiformats/go-multiaddr" "github.com/ethereum/go-ethereum/common" ) @@ -110,3 +114,126 @@ func ValidateDataIntegrity(k []byte, expectedSize int) bool { } return true } + +func ParseDialErrors(errMsg string) []DialError { + // Regular expression to match the array of failed dial attempts + re := regexp.MustCompile(`all dials failed\n((?:\s*\*\s*\[.*\].*\n?)+)`) + + match := re.FindStringSubmatch(errMsg) + if len(match) < 2 { + return nil + } + + // Split the matched string into individual dial attempts + dialAttempts := strings.Split(strings.TrimSpace(match[1]), "\n") + + // Regular expression to extract multiaddr and error message + reAttempt := regexp.MustCompile(`\[(.*?)\]\s*(.*)`) + + var dialErrors []DialError + for _, attempt := range dialAttempts { + attempt = strings.TrimSpace(strings.Trim(attempt, "* ")) + matches := reAttempt.FindStringSubmatch(attempt) + if len(matches) != 3 { + continue + } + errMsg := strings.TrimSpace(matches[2]) + ma, err := multiaddr.NewMultiaddr(matches[1]) + if err != nil { + continue + } + protocols := ma.Protocols() + protocolsStr := "/" + for i, protocol := range protocols { + protocolsStr += protocol.Name + if i < len(protocols)-1 { + protocolsStr += "/" + } + } + dialErrors = append(dialErrors, DialError{ + Protocols: protocolsStr, + MultiAddr: matches[1], + ErrMsg: errMsg, + ErrType: CategorizeDialError(errMsg), + }) + + } + + return dialErrors +} + +// DialErrorType represents the type of dial error +type DialErrorType int + +const ( + ErrorUnknown DialErrorType = iota + ErrorIOTimeout + ErrorConnectionRefused + ErrorRelayCircuitFailed + ErrorRelayNoReservation + ErrorSecurityNegotiationFailed + ErrorConcurrentDialSucceeded + ErrorConcurrentDialFailed + ErrorConnectionsPerIPLimitExceeded + ErrorStreamReset + ErrorRelayResourceLimitExceeded + ErrorOpeningHopStreamToRelay + ErrorDialBackoff +) + +func (det DialErrorType) String() string { + return [...]string{ + "Unknown", + "I/O Timeout", + "Connection Refused", + "Relay Circuit Failed", + "Relay No Reservation", + "Security Negotiation Failed", + "Concurrent Dial Succeeded", + "Concurrent Dial Failed", + "Connections Per IP Limit Exceeded", + "Stream Reset", + "Relay Resource Limit Exceeded", + "Error Opening Hop Stream to Relay", + "Dial Backoff", + }[det] +} + +func CategorizeDialError(errMsg string) DialErrorType { + switch { + case strings.Contains(errMsg, "i/o timeout"): + return ErrorIOTimeout + case strings.Contains(errMsg, "connect: connection refused"): + return ErrorConnectionRefused + case strings.Contains(errMsg, "error opening relay circuit: CONNECTION_FAILED"): + return ErrorRelayCircuitFailed + case strings.Contains(errMsg, "error opening relay circuit: NO_RESERVATION"): + return ErrorRelayNoReservation + case strings.Contains(errMsg, "failed to negotiate security protocol"): + return ErrorSecurityNegotiationFailed + case strings.Contains(errMsg, "concurrent active dial succeeded"): + return ErrorConcurrentDialSucceeded + case strings.Contains(errMsg, "concurrent active dial through the same relay failed"): + return ErrorConcurrentDialFailed + case strings.Contains(errMsg, "connections per ip limit exceeded"): + return ErrorConnectionsPerIPLimitExceeded + case strings.Contains(errMsg, "stream reset"): + return ErrorStreamReset + case strings.Contains(errMsg, "error opening relay circuit: RESOURCE_LIMIT_EXCEEDED"): + return ErrorRelayResourceLimitExceeded + case strings.Contains(errMsg, "error opening hop stream to relay: connection failed"): + return ErrorOpeningHopStreamToRelay + case strings.Contains(errMsg, "dial backoff"): + return ErrorDialBackoff + default: + return ErrorUnknown + } +} + +// DialError represents a single dial error with its multiaddr and error message +type DialError struct { + MultiAddr string + ErrMsg string + ErrType DialErrorType + Protocols string +} diff --git a/wakuv2/common/helpers_test.go b/wakuv2/common/helpers_test.go new file mode 100644 index 00000000000..7081d8954ae --- /dev/null +++ b/wakuv2/common/helpers_test.go @@ -0,0 +1,46 @@ +package common + +import ( + "testing" +) + +var testCases = []struct { + errorString string + errorTypes []DialErrorType +}{ + { + errorString: "failed to dial: failed to dial 16Uiu2HAmNFvubdwLtyScgQKMVL7Ppwvd7RZskgThtPAGqMrUfs1V: all dials failed\n * [/ip4/0.0.0.0/tcp/55136] dial tcp4 0.0.0.0:60183->146.4.106.194:55136: i/o timeout", + errorTypes: []DialErrorType{ErrorIOTimeout}, + }, + { + errorString: "failed to dial: failed to dial 16Uiu2HAmC1BsqZfy9exnA3DiQHAo3gdAopTQRErLUjK8WoospTwq: all dials failed\n * [/ip4/0.0.0.0/tcp/46949] dial tcp4 0.0.0.0:60183->0.0.0.0:46949: i/o timeout\n * [/ip4/0.0.0.0/tcp/51063] dial tcp4 0.0.0.0:60183->0.0.0.0:51063: i/o timeout", + errorTypes: []DialErrorType{ErrorIOTimeout, ErrorIOTimeout}, + }, + { + errorString: "failed to dial: failed ito dial 16Uiu2HAkyjvXPmymR5eRnvxCufRGZdfRrgjME6bmn3Xo6aprE1eo: all dials failed\n * [/ip4/0.0.0.0/tcp/443/wss/p2p/16Uiu2HAmB7Ur9HQqo3cWDPovRQjo57fxWWDaQx27WxSzDGhN4JKg/p2p-circuit] error opening relay circuit: CONNECTION_FAILED (203)\n * [/ip4/0.0.0.0/tcp/30303/p2p/16Uiu2HAmB7Ur9HQqo3cWDPovRQjo57fxWWDaQx27WxSzDGhN4JKg/p2p-circuit] concurrent active dial through the same relay failed with a protocol error\n * [/ip4/0.0.0.0/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT/p2p-circuit] error opening relay circuit: CONNECTION_FAILED (203)\n * [/ip4/0.0.0.0/tcp/443/wss/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT/p2p-circuit] concurrent active dial through the same relay failed with a protocol error", + errorTypes: []DialErrorType{ErrorRelayCircuitFailed, ErrorConcurrentDialFailed, ErrorRelayCircuitFailed, ErrorConcurrentDialFailed}, + }, + { + errorString: "failed to dial: failed to dial 16Uiu2HAm9QijC9d2GsGKPLLF7cZXMFEadqvN7FqhFJ2z5jdW6AFY: all dials failed\n * [/ip4/0.0.0.0/tcp/64012] dial tcp4 0.0.0.0:64012: connect: connection refused", + errorTypes: []DialErrorType{ErrorConnectionRefused}, + }, + { + errorString: "failed to dial: failed to dial 16Uiu2HAm7jXmopqB6BUJAQH1PKcZULfSKgj9rC9pyBRKwJGTiRHf: all dials failed\n * [/ip4/34.135.13.87/tcp/30303/p2p/16Uiu2HAm8mUZ18tBWPXDQsaF7PbCKYA35z7WB2xNZH2EVq1qS8LJ/p2p-circuit] error opening relay circuit: NO_RESERVATION (204)\n * [/ip4/34.170.192.39/tcp/30303/p2p/16Uiu2HAmMELCo218hncCtTvC2Dwbej3rbyHQcR8erXNnKGei7WPZ/p2p-circuit] error opening relay circuit: NO_RESERVATION (204)\n * [/ip4/178.72.78.116/tcp/42841] dial tcp4 0.0.0.0:60183->178.72.78.116:42841: i/o timeout", + errorTypes: []DialErrorType{ErrorRelayNoReservation, ErrorRelayNoReservation, ErrorIOTimeout}, + }, + { + errorString: "failed to dial: failed to dial 16Uiu2HAmMUYpufreYsUBo4A56BQDnbMwN4mhP3wMWTM4reS8ivxd: all dials failed\n * [/ip4/0.0.0.0/tcp/52957] unknown", + errorTypes: []DialErrorType{ErrorUnknown}, + }, +} + +func TestParseDialErrors(t *testing.T) { + for _, testCase := range testCases { + parsedErrors := ParseDialErrors(testCase.errorString) + for i, err := range parsedErrors { + if err.ErrType != testCase.errorTypes[i] { + t.Errorf("Expected error type %v, got %v", testCase.errorTypes[i], err.ErrType) + } + } + } +} diff --git a/wakuv2/common/message.go b/wakuv2/common/message.go index f58f36c0abe..58521ae75c7 100644 --- a/wakuv2/common/message.go +++ b/wakuv2/common/message.go @@ -6,12 +6,15 @@ import ( "sync/atomic" "time" + "go.uber.org/zap" + "github.com/waku-org/go-waku/waku/v2/payload" "github.com/waku-org/go-waku/waku/v2/protocol" + "github.com/status-im/status-go/logutils" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" ) // MessageType represents where this message comes from @@ -105,7 +108,10 @@ type MemoryMessageStore struct { func NewReceivedMessage(env *protocol.Envelope, msgType MessageType) *ReceivedMessage { ct, err := ExtractTopicFromContentTopic(env.Message().ContentTopic) if err != nil { - log.Debug("failed to extract content topic from message", "topic", env.Message().ContentTopic, "err", err) + logutils.ZapLogger().Debug("failed to extract content topic from message", + zap.String("topic", env.Message().ContentTopic), + zap.Error(err), + ) return nil } @@ -176,7 +182,7 @@ func (msg *ReceivedMessage) Open(watcher *Filter) (result *ReceivedMessage) { raw, err := payload.DecodePayload(msg.Envelope.Message(), keyInfo) if err != nil { - log.Error("failed to decode message", "err", err) + logutils.ZapLogger().Error("failed to decode message", zap.Error(err)) return nil } @@ -191,7 +197,7 @@ func (msg *ReceivedMessage) Open(watcher *Filter) (result *ReceivedMessage) { ct, err := ExtractTopicFromContentTopic(msg.Envelope.Message().ContentTopic) if err != nil { - log.Error("failed to decode message", "err", err) + logutils.ZapLogger().Error("failed to decode message", zap.Error(err)) return nil } diff --git a/wakuv2/waku.go b/wakuv2/waku.go index 43c0626bc91..db9d3d7c7ce 100644 --- a/wakuv2/waku.go +++ b/wakuv2/waku.go @@ -113,6 +113,10 @@ type ITelemetryClient interface { PushMessageCheckFailure(ctx context.Context, messageHash string) PushPeerCountByShard(ctx context.Context, peerCountByShard map[uint16]uint) PushPeerCountByOrigin(ctx context.Context, peerCountByOrigin map[wps.Origin]uint) + PushDialFailure(ctx context.Context, dialFailure common.DialError) + PushMissedMessage(ctx context.Context, envelope *protocol.Envelope) + PushMissedRelevantMessage(ctx context.Context, message *common.ReceivedMessage) + PushMessageDeliveryConfirmed(ctx context.Context, messageHash string) } // Waku represents a dark communication interface through the Ethereum @@ -196,7 +200,10 @@ func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) { func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] { cache := ttlcache.New[gethcommon.Hash, *common.ReceivedMessage](ttlcache.WithTTL[gethcommon.Hash, *common.ReceivedMessage](cacheTTL)) - go cache.Start() + go func() { + defer gocommon.LogOnPanic() + cache.Start() + }() return cache } @@ -1023,6 +1030,11 @@ func (w *Waku) SkipPublishToTopic(value bool) { func (w *Waku) ConfirmMessageDelivered(hashes []gethcommon.Hash) { w.messageSender.MessagesDelivered(hashes) + if w.statusTelemetryClient != nil { + for _, hash := range hashes { + w.statusTelemetryClient.PushMessageDeliveryConfirmed(w.ctx, hash.String()) + } + } } func (w *Waku) SetStorePeerID(peerID peer.ID) { @@ -1146,12 +1158,24 @@ func (w *Waku) Start() error { peerTelemetryTicker := time.NewTicker(peerTelemetryTickerInterval) defer peerTelemetryTicker.Stop() + sub, err := w.node.Host().EventBus().Subscribe(new(utils.DialError)) + if err != nil { + w.logger.Error("failed to subscribe to dial errors", zap.Error(err)) + return + } + defer sub.Close() + for { select { case <-w.ctx.Done(): return case <-peerTelemetryTicker.C: w.reportPeerMetrics() + case dialErr := <-sub.Out(): + errors := common.ParseDialErrors(dialErr.(utils.DialError).Err.Error()) + for _, dialError := range errors { + w.statusTelemetryClient.PushDialFailure(w.ctx, common.DialError{ErrType: dialError.ErrType, ErrMsg: dialError.ErrMsg, Protocols: dialError.Protocols}) + } } } }() @@ -1166,7 +1190,6 @@ func (w *Waku) Start() error { go w.runPeerExchangeLoop() if w.cfg.EnableMissingMessageVerification { - w.missingMsgVerifier = missing.NewMissingMessageVerifier( w.node.Store(), w, @@ -1219,7 +1242,10 @@ func (w *Waku) Start() error { w.wg.Add(1) go w.broadcast() - go w.sendQueue.Start(w.ctx) + go func() { + defer gocommon.LogOnPanic() + w.sendQueue.Start(w.ctx) + }() err = w.startMessageSender() if err != nil { @@ -1444,6 +1470,12 @@ func (w *Waku) OnNewEnvelopes(envelope *protocol.Envelope, msgType common.Messag return nil } + if w.statusTelemetryClient != nil { + if msgType == common.MissingMessageType { + w.statusTelemetryClient.PushMissedMessage(w.ctx, envelope) + } + } + logger := w.logger.With( zap.String("messageType", msgType), zap.Stringer("envelopeHash", envelope.Hash()), @@ -1562,6 +1594,9 @@ func (w *Waku) processMessage(e *common.ReceivedMessage) { w.storeMsgIDsMu.Unlock() } else { logger.Debug("filters did match") + if w.statusTelemetryClient != nil && e.MsgType == common.MissingMessageType { + w.statusTelemetryClient.PushMissedRelevantMessage(w.ctx, e) + } e.Processed.Store(true) } @@ -1718,7 +1753,10 @@ func (w *Waku) ConnectionChanged(state connection.State) { isOnline := !state.Offline if w.cfg.LightClient { //TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114 - go w.filterManager.OnConnectionStatusChange("", isOnline) + go func() { + defer gocommon.LogOnPanic() + w.filterManager.OnConnectionStatusChange("", isOnline) + }() w.handleNetworkChangeFromApp(state) } else { // for lightClient state update and onlineChange is handled in filterManager. @@ -1787,7 +1825,12 @@ func (w *Waku) seedBootnodesForDiscV5() { } if !canQuery() { - w.logger.Info("can't query bootnodes", zap.Int("peer-count", len(w.node.Host().Network().Peers())), zap.Int64("lastTry", lastTry), zap.Int64("now", now()), zap.Int64("backoff", bootnodesQueryBackoffMs*int64(math.Exp2(float64(retries)))), zap.Int("retries", retries)) + w.logger.Info("can't query bootnodes", + zap.Int("peer-count", len(w.node.Host().Network().Peers())), + zap.Int64("lastTry", lastTry), zap.Int64("now", now()), + zap.Int64("backoff", bootnodesQueryBackoffMs*int64(math.Exp2(float64(retries)))), + zap.Int("retries", retries), + ) continue } diff --git a/wakuv2/waku_test.go b/wakuv2/waku_test.go index 9792e50828a..87ff4c2e099 100644 --- a/wakuv2/waku_test.go +++ b/wakuv2/waku_test.go @@ -11,8 +11,6 @@ import ( "testing" "time" - "go.uber.org/zap" - "github.com/cenkalti/backoff/v3" "github.com/libp2p/go-libp2p/core/metrics" "github.com/libp2p/go-libp2p/core/peer" @@ -332,8 +330,7 @@ func makeTestTree(domain string, nodes []*enode.Node, links []string) (*ethdnsdi } func TestPeerExchange(t *testing.T) { - logger, err := zap.NewDevelopment() - require.NoError(t, err) + logger := tt.MustCreateTestLogger() // start node which serve as PeerExchange server config := &Config{} config.ClusterID = 16 @@ -683,8 +680,7 @@ func TestOnlineChecker(t *testing.T) { } func TestLightpushRateLimit(t *testing.T) { - logger, err := zap.NewDevelopment() - require.NoError(t, err) + logger := tt.MustCreateTestLogger() config0 := &Config{} setDefaultConfig(config0, false) @@ -784,10 +780,7 @@ func TestLightpushRateLimit(t *testing.T) { } func TestTelemetryFormat(t *testing.T) { - logger, err := zap.NewDevelopment() - require.NoError(t, err) - - tc := NewBandwidthTelemetryClient(logger, "#") + tc := NewBandwidthTelemetryClient(tt.MustCreateTestLogger(), "#") s := metrics.Stats{ TotalIn: 10, @@ -804,6 +797,6 @@ func TestTelemetryFormat(t *testing.T) { m[lightpush.LightPushID_v20beta1] = s requestBody := tc.getTelemetryRequestBody(m) - _, err = json.Marshal(requestBody) + _, err := json.Marshal(requestBody) require.NoError(t, err) }